source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
grid_refinement.c | //
// Created by sachetto on 30/09/17.
//
#include "grid.h"
#include "../../single_file_libraries/stb_ds.h"
/**
* Decides if the grid should be refined by traversing the whole grid, according
* to parameters refinementLevel and refinementBound. A cell will not be refined
* either if its refinement level is equal to refinementLevel or the highest
* of all fluxes coming into it from the six directions is less than
* refinementBound.
*
* @param min_h Minimum refinement level required for the graph.
* @param refinement_bound Minimum flux required for each cell of graph.
*/
bool refine_grid_with_bound(struct grid *the_grid, real_cpu refinement_bound, real_cpu min_dx, real_cpu min_dy,
real_cpu min_dz) {
if(min_dx <= 0.0) {
fprintf(stderr, "refine_grid(): Parameter min_dx must be positive, passed %lf.", min_dx);
return false;
}
if(min_dy <= 0.0) {
fprintf(stderr, "refine_grid(): Parameter min_dy must be positive, passed %lf.", min_dy);
return false;
}
if(min_dz <= 0.0) {
fprintf(stderr, "refine_grid(): Parameter min_dz must be positive, passed %lf.", min_dz);
return false;
}
struct cell_node *grid_cell, *auxiliar_grid_cell;
real_cpu maximum_flux;
bool continue_refining = true;
bool refined_once = false;
set_grid_flux(the_grid);
uint32_t *free_sv_pos = the_grid->free_sv_positions;
arrsetlen(the_grid->refined_this_step,0);
while(continue_refining) {
continue_refining = false;
grid_cell = the_grid->first_cell;
while(grid_cell != 0) {
maximum_flux = get_cell_maximum_flux(grid_cell);
if((grid_cell->can_change && grid_cell->active) && (grid_cell->discretization.x > min_dx) && (grid_cell->discretization.y > min_dy) &&
(grid_cell->discretization.z > min_dz) && (maximum_flux >= refinement_bound)) {
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell(auxiliar_grid_cell, free_sv_pos, &(the_grid->refined_this_step));
the_grid->number_of_cells += 7;
continue_refining = true;
refined_once = true;
} else {
grid_cell = grid_cell->next;
}
}
}
return refined_once;
}
void refine_grid(struct grid *the_grid, int num_steps) {
if(the_grid == NULL) {
fprintf(stderr, "refine_grid(): Parameter the_grid can't be null. Exiting!");
exit(10);
}
struct cell_node *grid_cell, *auxiliar_grid_cell;
for(int i = 0; i < num_steps; i++) {
grid_cell = the_grid->first_cell;
while(grid_cell != 0) {
if(grid_cell->can_change && grid_cell->active) {
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell(auxiliar_grid_cell, NULL, NULL);
the_grid->number_of_cells += 7;
} else {
grid_cell = grid_cell->next;
}
}
}
}
void refine_grid_cell(struct grid *the_grid, struct cell_node *grid_cell) {
if(!grid_cell) {
fprintf(stderr, "refine_grid_cell: grid_cell is NULL.\n");
exit(10);
}
refine_cell(grid_cell, NULL, NULL);
the_grid->number_of_cells += 7;
}
void set_grid_flux(struct grid *the_grid) {
uint32_t active_cells = the_grid->num_active_cells;
struct cell_node **ac = the_grid->active_cells;
int i;
#pragma omp parallel for
for(i = 0; i < active_cells; i++) {
ac[i]->north_flux = 0.0;
ac[i]->south_flux = 0.0;
ac[i]->east_flux = 0.0;
ac[i]->west_flux = 0.0;
ac[i]->front_flux = 0.0;
ac[i]->back_flux = 0.0;
}
#pragma omp parallel for
for(i = 0; i < active_cells; i++) {
set_cell_flux(ac[i], 's'); // Computes south flux.
set_cell_flux(ac[i], 'n'); // Computes north flux.
set_cell_flux(ac[i], 'e'); // Computes east flux.
set_cell_flux(ac[i], 'w'); // Computes west flux.
set_cell_flux(ac[i], 'f'); // Computes front flux.
set_cell_flux(ac[i], 'b'); // Computes back flux.
}
} |
GB_binop__gt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint8)
// A*D function (colscale): GB (_AxD__gt_uint8)
// D*A function (rowscale): GB (_DxB__gt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint8)
// C=scalar+B GB (_bind1st__gt_uint8)
// C=scalar+B' GB (_bind1st_tran__gt_uint8)
// C=A+scalar GB (_bind2nd__gt_uint8)
// C=A'+scalar GB (_bind2nd_tran__gt_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxEvaluateStrongFromEdgeRHS.c | #ifdef _OPENMP
#include <omp.h>
#endif
#include "mex.h"
#include "blas.h"
// #if !defined(_WIN32)
// #define dgemm dgemm_
// #endif
#define DEBUG 0
#define NRHS 10
#define NLHS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* check input & output */
if (nrhs != NRHS) {
mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NRHS);
}
if (nlhs != NLHS) {
mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NLHS);
}
double *invM = mxGetPr(prhs[0]);
double *Mb = mxGetPr(prhs[1]);
double *FToE = mxGetPr(prhs[2]);
double *FToN1 = mxGetPr(prhs[3]);
double *FToN2 = mxGetPr(prhs[4]);
double *Js = mxGetPr(prhs[5]);
double *J = mxGetPr(prhs[6]);
double *fluxM = mxGetPr(prhs[7]);
double *fluxP = mxGetPr(prhs[8]);
double *fluxS = mxGetPr(prhs[9]);
// dims = mxGetDimensions(prhs[6]);
const int Np = mxGetM(prhs[6]); // num of interp nodes
const int K = mxGetN(prhs[6]); // num of elements
const mwSize *dims = mxGetDimensions(prhs[7]);
const int Nfp = dims[0];
const int Ne = dims[1]; // num of edges
int Nfield;
if (mxGetNumberOfDimensions(prhs[7]) > 2) {
Nfield = dims[2];
} else {
Nfield = 1; // fluxM is a 2D matrix
}
const size_t ndimOut = 3;
const mwSize dimOut[3] = {Np, K, Nfield};
plhs[0] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
double *frhs = mxGetPr(plhs[0]);
char *chn = "N";
double one = 1.0, zero = 0.0;
ptrdiff_t oneI = 1;
ptrdiff_t np = Np;
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int fld = 0; fld < Nfield; fld++) {
double *rhs = frhs + Np * K * fld;
double *fluxM_ = fluxM + Nfp * Ne * fld;
double *fluxP_ = fluxP + Nfp * Ne * fld;
double *fluxS_ = fluxS + Nfp * Ne * fld;
for (int k = 0; k < Ne; k++) { // evaluate rhs on each edge
const int e1 = (int)FToE[2 * k] - 1;
const int e2 = (int)FToE[2 * k + 1] - 1;
const int ind1 = e1 * Np - 1;
const int ind2 = e2 * Np - 1;
const int ind = k * Nfp;
double rhsM[Nfp], rhsP[Nfp];
for (int n = 0; n < Nfp; n++) {
rhsM[n] = 0;
rhsP[n] = 0;
}
for (int n = 0; n < Nfp; n++) {
const int sk = n + ind;
double dfM = fluxM_[sk] - fluxS_[sk];
double dfP = fluxP_[sk] - fluxS_[sk];
double j = Js[sk];
double *mb = Mb + n * Nfp;
for (int m = 0; m < Nfp; m++) {
rhsM[m] += mb[m] * j * dfM;
rhsP[m] -= mb[m] * j * dfP;
}
}
for (int n = 0; n < Nfp; n++) {
const int sk = n + ind;
const int m1 = (int)FToN1[sk] + ind1;
const int m2 = (int)FToN2[sk] + ind2;
rhs[m1] += rhsM[n];
rhs[m2] += rhsP[n];
}
}
double temp[Np];
for (int k = 0; k < K; k++) {
double *rhs_ = rhs + k * Np;
double *j = J + k * Np;
dgemm(chn, chn, &np, &oneI, &np, &one, invM, &np, rhs_, &np, &zero, temp,
&np);
// copy rhs
for (int n = 0; n < Np; n++) {
rhs_[n] = temp[n] / j[n];
}
}
}
return;
} |
c-omp.c | /* This file contains routines to construct OpenACC and OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005-2015 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "c-common.h"
#include "c-pragma.h"
#include "gimple-expr.h"
#include "langhooks.h"
#include "omp-low.h"
#include "gomp-constants.h"
/* Complete a #pragma oacc wait construct. LOC is the location of
the #pragma. */
tree
c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
{
const int nparms = list_length (parms);
tree stmt, t;
vec<tree, va_gc> *args;
vec_alloc (args, nparms + 2);
stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
t = OMP_CLAUSE_ASYNC_EXPR (clauses);
else
t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
args->quick_push (t);
args->quick_push (build_int_cst (integer_type_node, nparms));
for (t = parms; t; t = TREE_CHAIN (t))
{
if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
args->quick_push (build_int_cst (integer_type_node,
TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
else
args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
}
stmt = build_call_expr_loc_vec (loc, stmt, args);
add_stmt (stmt);
vec_free (args);
return stmt;
}
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. LOC is the l*/
tree
c_finish_omp_master (location_t loc, tree stmt)
{
tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
SET_EXPR_LOCATION (t, loc);
return t;
}
/* Complete a #pragma omp taskgroup construct. STMT is the structured-block
that follows the pragma. LOC is the l*/
tree
c_finish_omp_taskgroup (location_t loc, tree stmt)
{
tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
SET_EXPR_LOCATION (t, loc);
return t;
}
/* Complete a #pragma omp critical construct. STMT is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. LOC is the location of the #pragma. */
tree
c_finish_omp_critical (location_t loc, tree body, tree name)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_ordered (location_t loc, tree stmt)
{
tree t = build1 (OMP_ORDERED, void_type_node, stmt);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Complete a #pragma omp barrier construct. LOC is the location of
the #pragma. */
void
c_finish_omp_barrier (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskwait (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskyield construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskyield (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
the expression to be implemented atomically is LHS opcode= RHS.
For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
opcode= RHS with the new or old content of LHS returned.
LOC is the location of the atomic statement. The value returned
is either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC* node which should be added to the current statement
tree with add_stmt. */
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
enum tree_code opcode, tree lhs, tree rhs,
tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
{
tree x, type, addr, pre = NULL_TREE;
if (lhs == error_mark_node || rhs == error_mark_node
|| v == error_mark_node || lhs1 == error_mark_node
|| rhs1 == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
if (opcode == RDIV_EXPR)
opcode = TRUNC_DIV_EXPR;
/* ??? Validate that rhs does not overlap lhs. */
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
if (addr == error_mark_node)
return error_mark_node;
addr = save_expr (addr);
if (TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr));
DECL_CONTEXT (var) = current_function_decl;
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
lhs = build_indirect_ref (loc, addr, RO_NULL);
if (code == OMP_ATOMIC_READ)
{
x = build1 (OMP_ATOMIC_READ, type, addr);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_SEQ_CST (x) = seq_cst;
return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
}
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
if (swapped)
{
rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
opcode = NOP_EXPR;
}
bool save = in_late_binary_op;
in_late_binary_op = true;
x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
in_late_binary_op = save;
if (x == error_mark_node)
return error_mark_node;
if (TREE_CODE (x) == COMPOUND_EXPR)
{
pre = TREE_OPERAND (x, 0);
gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
x = TREE_OPERAND (x, 1);
}
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
if (code == OMP_ATOMIC)
type = void_type_node;
x = build2 (code, type, addr, rhs);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_SEQ_CST (x) = seq_cst;
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (rhs1
&& TREE_CODE (rhs1) == VAR_DECL
&& TREE_CODE (lhs) == VAR_DECL
&& rhs1 != lhs)
{
if (code == OMP_ATOMIC)
error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
else
error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
return error_mark_node;
}
if (code != OMP_ATOMIC)
{
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
{
if (lhs1 != lhs)
{
error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
return error_mark_node;
}
}
x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
if (rhs1 && rhs1 != lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (lhs1 && lhs1 != lhs)
{
tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
if (lhs1addr == error_mark_node)
return error_mark_node;
if (code == OMP_ATOMIC_CAPTURE_OLD)
x = omit_one_operand_loc (loc, type, x, lhs1addr);
else
{
x = save_expr (x);
x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
}
}
}
else if (rhs1 && rhs1 != lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (pre)
x = omit_one_operand_loc (loc, type, x, pre);
return x;
}
/* Complete a #pragma omp flush construct. We don't do anything with
the variable list that the syntax allows. LOC is the location of
the #pragma. */
void
c_finish_omp_flush (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Check and canonicalize OMP_FOR increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
CASE_CONVERT:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert_loc (loc, TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, MINUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
case COMPOUND_EXPR:
{
/* cp_build_modify_expr forces preevaluation of the RHS to make
sure that it is evaluated before the lvalue-rvalue conversion
is applied to the LHS. Reconstruct the original expression. */
tree op0 = TREE_OPERAND (exp, 0);
if (TREE_CODE (op0) == TARGET_EXPR
&& !VOID_TYPE_P (TREE_TYPE (op0)))
{
tree op1 = TREE_OPERAND (exp, 1);
tree temp = TARGET_EXPR_SLOT (op0);
if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
&& TREE_OPERAND (op1, 1) == temp)
{
op1 = copy_node (op1);
TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
return check_omp_for_incr_expr (loc, op1, decl);
}
}
break;
}
default:
break;
}
return error_mark_node;
}
/* If the OMP_FOR increment expression in INCR is of pointer type,
canonicalize it into an expression handled by gimplify_omp_for()
and return it. DECL is the iteration variable. */
static tree
c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
{
if (POINTER_TYPE_P (TREE_TYPE (decl))
&& TREE_OPERAND (incr, 1))
{
tree t = fold_convert_loc (loc,
sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
t = fold_build_pointer_plus (decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
return incr;
}
/* Validate and generate OMP_FOR.
DECLV is a vector of iteration variables, for each collapsed loop.
INITV, CONDV and INCRV are vectors containing initialization
expressions, controlling predicates and increment expressions.
BODY is the body of the loop and PRE_BODY statements that go before
the loop. */
tree
c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
tree initv, tree condv, tree incrv, tree body, tree pre_body)
{
location_t elocus;
bool fail = false;
int i;
if ((code == CILK_SIMD || code == CILK_FOR)
&& !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
fail = true;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
fail = true;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error_at (elocus, "%qE is not initialized", decl);
init = integer_zero_node;
fail = true;
}
init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
/* FIXME diagnostics: This should
be the location of the INIT. */
elocus,
init,
NULL_TREE);
}
if (init != error_mark_node)
{
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
}
if (cond == NULL_TREE)
{
error_at (elocus, "missing controlling predicate");
fail = true;
}
else
{
bool cond_ok = false;
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR
|| TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
if (TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
{
if (code != CILK_SIMD && code != CILK_FOR)
cond_ok = false;
}
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MIN_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? GT_EXPR : LE_EXPR);
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MAX_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? LT_EXPR : GE_EXPR);
else if (code != CILK_SIMD && code != CILK_FOR)
cond_ok = false;
}
}
if (!cond_ok)
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
break;
incr = TREE_OPERAND (incr, 1);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (elocus,
TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (code);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
/* Right now we have 14 different combined constructs, this
function attempts to split or duplicate clauses for combined
constructs. CODE is the innermost construct in the combined construct,
and MASK allows to determine which constructs are combined together,
as every construct has at least one clause that no other construct
has (except for OMP_SECTIONS, but that can be only combined with parallel).
Combined constructs are:
#pragma omp parallel for
#pragma omp parallel sections
#pragma omp parallel for simd
#pragma omp for simd
#pragma omp distribute simd
#pragma omp distribute parallel for
#pragma omp distribute parallel for simd
#pragma omp teams distribute
#pragma omp teams distribute parallel for
#pragma omp teams distribute parallel for simd
#pragma omp target teams
#pragma omp target teams distribute
#pragma omp target teams distribute parallel for
#pragma omp target teams distribute parallel for simd */
void
c_omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
tree next, c;
enum c_omp_clause_split s;
int i;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
cclauses[i] = NULL;
/* Add implicit nowait clause on
#pragma omp parallel {for,for simd,sections}. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
switch (code)
{
case OMP_FOR:
case OMP_SIMD:
cclauses[C_OMP_CLAUSE_SPLIT_FOR]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
case OMP_SECTIONS:
cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
default:
break;
}
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
/* First the clauses that are unique to some constructs. */
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_MAP:
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_DIST_SCHEDULE:
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_PROC_BIND:
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_ALIGNED:
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Duplicate this to all of distribute, for and simd. */
case OMP_CLAUSE_COLLAPSE:
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
/* Private clause is supported on all constructs but target,
it is enough to put it on the innermost one. For
#pragma omp {for,sections} put it on parallel though,
as that's what we did for OpenMP 3.1. */
case OMP_CLAUSE_PRIVATE:
switch (code)
{
case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
case OMP_FOR: case OMP_SECTIONS:
case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
default: gcc_unreachable ();
}
break;
/* Firstprivate clause is supported on all constructs but
target and simd. Put it on the outermost of those and
duplicate on parallel. */
case OMP_CLAUSE_FIRSTPRIVATE:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
s = C_OMP_CLAUSE_SPLIT_TEAMS;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else
/* This must be
#pragma omp parallel{, for{, simd}, sections}. */
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
/* This must be one of
#pragma omp {,target }teams distribute
#pragma omp target teams
#pragma omp {,target }teams distribute simd. */
gcc_assert (code == OMP_DISTRIBUTE
|| code == OMP_TEAMS
|| code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
/* This must be #pragma omp distribute simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else
{
/* This must be #pragma omp for simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_FOR;
}
break;
/* Lastprivate is allowed on for, sections and simd. In
parallel {for{, simd},sections} we actually want to put it on
parallel rather than for or sections. */
case OMP_CLAUSE_LASTPRIVATE:
if (code == OMP_FOR || code == OMP_SECTIONS)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
}
gcc_assert (code == OMP_SIMD);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
OMP_CLAUSE_CHAIN (c) = cclauses[s];
cclauses[s] = c;
}
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Shared and default clauses are allowed on private and teams. */
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_DEFAULT:
if (code == OMP_TEAMS)
{
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_CODE (clauses));
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
else
OMP_CLAUSE_DEFAULT_KIND (c)
= OMP_CLAUSE_DEFAULT_KIND (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
}
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
/* Reduction is allowed on simd, for, parallel, sections and teams.
Duplicate it on all of them, but omit on for or sections if
parallel is present. */
case OMP_CLAUSE_REDUCTION:
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if (code == OMP_SECTIONS)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_IF:
/* FIXME: This is currently being discussed. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
default:
gcc_unreachable ();
}
OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
cclauses[s] = clauses;
}
}
/* qsort callback to compare #pragma omp declare simd clauses. */
static int
c_omp_declare_simd_clause_cmp (const void *p, const void *q)
{
tree a = *(const tree *) p;
tree b = *(const tree *) q;
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
{
if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
return -1;
return 1;
}
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
{
int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
if (c < d)
return 1;
if (c > d)
return -1;
}
return 0;
}
/* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
CLAUSES on FNDECL into argument indexes and sort them. */
tree
c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
{
tree c;
vec<tree> clvec = vNULL;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
tree decl = OMP_CLAUSE_DECL (c);
tree arg;
int idx;
for (arg = parms, idx = 0; arg;
arg = TREE_CHAIN (arg), idx++)
if (arg == decl)
break;
if (arg == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not an function argument", decl);
continue;
}
OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
}
clvec.safe_push (c);
}
if (!clvec.is_empty ())
{
unsigned int len = clvec.length (), i;
clvec.qsort (c_omp_declare_simd_clause_cmp);
clauses = clvec[0];
for (i = 0; i < len; i++)
OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
}
clvec.release ();
return clauses;
}
/* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
void
c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
{
tree c;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
tree arg;
for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
arg = TREE_CHAIN (arg), i++)
if (i == idx)
break;
gcc_assert (arg);
OMP_CLAUSE_DECL (c) = arg;
}
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Variables with const-qualified type having no mutable member
are predetermined shared. */
if (TREE_READONLY (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
|
single_value.c | // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 2 %t) %s.reference_output
#include <stdio.h>
#include <omp.h>
int main()
{
int x = 0;
#pragma omp parallel shared(x)
{
x = 42;
}
printf("X: %d\n", x);
}
|
GB_unaryop__ainv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_uint64
// op(A') function: GB_tran__ainv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType *vals = reinterpret_cast<DType*>(work.dptr_);
DType *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
const int id_size(sizeof(int)*ind.size(0));
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(DType)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template<typename xpu, typename DType, typename IDType>
void TopKImpl(const RunContext &ctx,
const Resource &resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
Tensor<xpu, 2, DType> mask_val;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += sizeof(int) * src.Size();
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(DType) * src.Size());
size_t workspace_size = temp_size + sizeof(DType) * src.Size() + sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += sizeof(int) * batch_size * k + sizeof(DType) * batch_size * k;
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += sizeof(DType) * src.Size();
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += sizeof(int) * batch_size * k;
mask_val = Tensor<xpu, 2, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape2(batch_size * k, 1), s);
workspace_curr_ptr += sizeof(DType) * batch_size * k;
mask_val = scalar<DType>(1);
CHECK_EQ(sel_indices.CheckContiguous(), true);
CHECK_EQ(mask_val.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 2, DType> ret_mask =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(ret[0].Size(), 1), s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
IndexFill(ret_mask, sel_indices, mask_val);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
})
});
} else {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
});
}
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
});
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx,
ctx.requested[0], req, inputs[0], outputs, topk_param);
});
});
}
template<typename xpu, typename DType, typename IDType>
void TopKBackwardImpl(const OpContext &ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 1, int> workspace =
ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k * 2 + batch_size), s);
Tensor<xpu, 1, int> sel_indices =
Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, int> batch_shift =
Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 1, int> dummy_index =
Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k + batch_size,
Shape1(batch_size * k), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo,
batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<int>(indices);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(tcast<int>(indices) +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
IndexFill(in_grad, sel_indices, out_grad);
} else if (kAddTo == req[0]) {
// TODO(sxjscience) We can use AddTakeGrad in the future.
// However, the current implementation of AddTakeGrad is not so efficient.
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, sel_indices.shape_.Size(), 1, 0, 1, kWriteTo,
dummy_index.dptr_);
mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad);
} else if (kNullOp == req[0]) {
return;
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param);
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param);
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype))
<< "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices to int32.";
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices to int32.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
pr62021.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#pragma omp declare simd linear(y)
__attribute__((noinline)) int *
foo (int *x, int y)
{
return x + y;
}
int a[1024];
int *b[1024] = { &a[0] };
int
main ()
{
int i;
check_vect ();
for (i = 0; i < 1024; i++)
b[i] = &a[1023 - i];
#pragma omp simd
for (i = 0; i < 1024; i++)
b[i] = foo (b[i], i);
for (i = 0; i < 1024; i++)
if (b[i] != &a[1023])
__builtin_abort ();
return 0;
}
|
traverse_eager.h | #ifndef traverse_eager_h
#define traverse_eager_h
#include "exafmm.h"
#include "kernel.h"
namespace exafmm {
//! Recursive call to post-order tree traversal for upward pass
void upwardPass(Cell * Ci) {
for (Cell * Cj=Ci->child; Cj!=Ci->child+Ci->numChilds; Cj++) {
#pragma omp task untied if(Cj->numBodies > 100)
upwardPass(Cj);
}
#pragma omp taskwait
Ci->M.resize(NTERM, 0.0);
Ci->L.resize(NTERM, 0.0);
if(Ci->numChilds==0) P2M(Ci);
M2M(Ci);
}
//! Upward pass interface
void upwardPass(Cells & cells) {
#pragma omp parallel
#pragma omp single nowait
upwardPass(&cells[0]);
}
//! Recursive call to dual tree traversal for horizontal pass
void horizontalPass(Cell * Ci, Cell * Cj) {
vec3 dX = Ci->X - Cj->X;
real_t R2 = norm(dX) * THETA * THETA;
if (R2 > (Ci->R + Cj->R) * (Ci->R + Cj->R)) {
M2L(Ci, Cj);
} else if (Ci->numChilds == 0 && Cj->numChilds == 0) {
P2P(Ci, Cj);
} else if (Cj->numChilds == 0 || (Ci->R >= Cj->R && Ci->numChilds != 0)) {
for (Cell * ci=Ci->child; ci!=Ci->child+Ci->numChilds; ci++) {
#pragma omp task untied if(ci->numBodies > 100)
horizontalPass(ci, Cj);
}
} else {
for (Cell * cj=Cj->child; cj!=Cj->child+Cj->numChilds; cj++) {
horizontalPass(Ci, cj);
}
}
#pragma omp taskwait
}
//! Horizontal pass interface
void horizontalPass(Cells & icells, Cells & jcells) {
#pragma omp parallel
#pragma omp single nowait
horizontalPass(&icells[0], &jcells[0]);
}
//! Recursive call to pre-order tree traversal for downward pass
void downwardPass(Cell * Cj) {
L2L(Cj);
if (Cj->numChilds==0) L2P(Cj);
for (Cell * Ci=Cj->child; Ci!=Cj->child+Cj->numChilds; Ci++) {
#pragma omp task untied if(Ci->numBodies > 100)
downwardPass(Ci);
}
#pragma omp taskwait
}
//! Downward pass interface
void downwardPass(Cells & cells) {
#pragma omp parallel
#pragma omp single nowait
downwardPass(&cells[0]);
}
//! Direct summation
void direct(Bodies & bodies, Bodies & jbodies) {
Cells cells(2);
Cell * Ci = &cells[0];
Cell * Cj = &cells[1];
Ci->body = &bodies[0];
Ci->numBodies = bodies.size();
Cj->body = &jbodies[0];
Cj->numBodies = jbodies.size();
P2P(Ci, Cj);
}
}
#endif
|
GB_binop__isne_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64)
// A*D function (colscale): GB (_AxD__isne_int64)
// D*A function (rowscale): GB (_DxB__isne_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64)
// C=scalar+B GB (_bind1st__isne_int64)
// C=scalar+B' GB (_bind1st_tran__isne_int64)
// C=A+scalar GB (_bind2nd__isne_int64)
// C=A'+scalar GB (_bind2nd_tran__isne_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sgeinv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeinv.c, normal z -> s, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_geinv
*
* Performs the LU inversion of a matrix A.
*
*******************************************************************************
*
* @param[in] m
* The number of rows in the matrix A. m >= 0
*
* @param[in] n
* The number of columns in the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the m-by-n matrix A to be inverted.
* On exit, the inverse of A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_cgeinv
* @sa plasma_dgeinv
* @sa plasma_sgeinv
*
******************************************************************************/
int plasma_sgeinv(int m, int n, float *pA, int lda, int *ipiv)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0 || imax(m, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geinv(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t A;
plasma_desc_t W;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
n, nb, 0, 0, n, nb, &W);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_sgeinv(A, ipiv, W, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(A, pA, lda, &sequence, &request);
}
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_geinv
*
* Computes the inverse of a complex symmetric
* positive definite matrix A using the Cholesky factorization.
*
*******************************************************************************
*
* @param[in] A
* On entry, the symmetric positive definite matrix A.
* On exit, the upper or lower triangle of the (symmetric)
* inverse of A, overwriting the input factor U or L.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[out] W
* Workspace of dimension (n, nb)
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sgeinv
* @sa plasma_omp_sgeinv
* @sa plasma_omp_cgeinv
* @sa plasma_omp_dgeinv
* @sa plasma_omp_sgeinv
*
******************************************************************************/
void plasma_omp_sgeinv(plasma_desc_t A, int *ipiv, plasma_desc_t W,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if ((A.m == 0) || (A.n == 0)) {
return;
}
// Factorize A.
plasma_psgetrf(A, ipiv, sequence, request);
// Invert triangular part.
plasma_pstrtri(PlasmaUpper, PlasmaNonUnit, A, sequence, request);
// Compute product of inverse of the upper and lower triangles.
plasma_psgetri_aux(A, W, sequence, request);
// Apply pivot.
plasma_psgeswp(PlasmaColumnwise, A, ipiv, -1, sequence, request);
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=image->colormap[i].red != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
status|=image->colormap[i].green != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
status|=image->colormap[i].blue != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].blue,range),range);
if (status == 0)
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
register ssize_t
i;
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelRed(p),range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelGreen(p),range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelBlue(p),range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=GetPixelOpacity(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelOpacity(p),range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=GetPixelIndex(indexes+x) !=
ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex(indexes+
x),range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IsRGBColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IsRGBColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].blue,range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].opacity,range),range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(q),
range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(q),
range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(q),
range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(q),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (IsMonochromeImage(image,&image->exception) == MagickFalse)
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image->type=type;
image_info=DestroyImageInfo(image_info);
return(status);
}
|
struct_scale.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Structured scale routine
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* hypre_StructScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructScale( HYPRE_Complex alpha,
hypre_StructVector *y )
{
hypre_Box *y_data_box;
HYPRE_Int yi;
HYPRE_Complex *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
hypre_SetIndex(unit_stride, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorNDim(y), loop_size,
y_data_box, start, unit_stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] *= alpha;
}
hypre_BoxLoop1End(yi);
}
return hypre_error_flag;
}
|
IonisationBox.c | // Re-write of find_HII_bubbles.c for being accessible within the MCMC
int INIT_ERFC_INTERPOLATION = 1;
int INIT_RECOMBINATIONS = 1;
double *ERFC_VALS, *ERFC_VALS_DIFF;
float absolute_delta_z;
float overdense_small_min, overdense_small_bin_width, overdense_small_bin_width_inv;
float overdense_large_min, overdense_large_bin_width, overdense_large_bin_width_inv;
float prev_overdense_small_min, prev_overdense_small_bin_width, prev_overdense_small_bin_width_inv;
float prev_overdense_large_min, prev_overdense_large_bin_width, prev_overdense_large_bin_width_inv;
float log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, log10Mturn_bin_width_inv;
float log10Mturn_min_MINI, log10Mturn_max_MINI, log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI;
int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI);
void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density);
int ComputeIonizedBox(float redshift, float prev_redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct AstroParams *astro_params, struct FlagOptions *flag_options,
struct PerturbedField *perturbed_field,
struct PerturbedField *previous_perturbed_field,
struct IonizedBox *previous_ionize_box,
struct TsBox *spin_temp,
struct PerturbHaloField *halos,
struct IonizedBox *box) {
int status;
Try{ // This Try brackets the whole function, so we don't indent.
LOG_DEBUG("input values:");
LOG_DEBUG("redshift=%f, prev_redshift=%f", redshift, prev_redshift);
#if LOG_LEVEL >= DEBUG_LEVEL
writeUserParams(user_params);
writeCosmoParams(cosmo_params);
writeAstroParams(flag_options, astro_params);
writeFlagOptions(flag_options);
#endif
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
omp_set_num_threads(user_params->N_THREADS);
// Other parameters used in the code
int i,j,k,x,y,z, LAST_FILTER_STEP, first_step_R, short_completely_ionised,i_halo;
int counter, N_halos_in_cell;
unsigned long long ct;
float growth_factor, pixel_mass, cell_length_factor, M_MIN, prev_growth_factor;
float erfc_denom, erfc_denom_cell, res_xH, Splined_Fcoll, xHII_from_xrays, curr_dens, massofscaleR, ION_EFF_FACTOR, growth_factor_dz;
float Splined_Fcoll_MINI, prev_dens, ION_EFF_FACTOR_MINI, prev_Splined_Fcoll, prev_Splined_Fcoll_MINI;
float ave_M_coll_cell, ave_N_min_cell, pixel_volume, density_over_mean;
double global_xH, ST_over_PS, f_coll, R, stored_R, f_coll_min;
double ST_over_PS_MINI, f_coll_MINI, f_coll_min_MINI;
double t_ast, Gamma_R_prefactor, rec, dNrec, sigmaMmax;
double Gamma_R_prefactor_MINI;
float fabs_dtdz, ZSTEP, z_eff;
const float dz = 0.01;
float dens_val, prev_dens_val;
int overdense_int,status_int;
int something_finite_or_infinite = 0;
int log10_Mturnover_MINI_int, log10_Mturnover_int;
int *overdense_int_boundexceeded_threaded = calloc(user_params->N_THREADS,sizeof(int));
if(user_params->USE_INTERPOLATION_TABLES) {
overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999;
overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-overdense_large_min);
overdense_large_bin_width_inv = 1./overdense_large_bin_width;
prev_overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999;
prev_overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-prev_overdense_large_min);
prev_overdense_large_bin_width_inv = 1./prev_overdense_large_bin_width;
}
double ave_log10_Mturnover, ave_log10_Mturnover_MINI;
float Mlim_Fstar, Mlim_Fesc;
float Mlim_Fstar_MINI, Mlim_Fesc_MINI;
float Mcrit_atom, log10_Mcrit_atom, log10_Mcrit_mol;
fftwf_complex *log10_Mturnover_unfiltered=NULL, *log10_Mturnover_filtered=NULL;
fftwf_complex *log10_Mturnover_MINI_unfiltered=NULL, *log10_Mturnover_MINI_filtered=NULL;
float log10_Mturnover, log10_Mturnover_MINI, Mcrit_LW, Mcrit_RE, Mturnover, Mturnover_MINI;
float min_density, max_density;
float prev_min_density, prev_max_density;
float stored_redshift, adjustment_factor;
gsl_rng * r[user_params->N_THREADS];
LOG_SUPER_DEBUG("initing heat");
init_heat();
float TK;
TK = T_RECFAST(redshift,0);
LOG_SUPER_DEBUG("inited heat");
init_ps();
LOG_SUPER_DEBUG("defined parameters");
pixel_volume = pow(user_params->BOX_LEN/((float)(user_params->HII_DIM)), 3);
if(flag_options->USE_MASS_DEPENDENT_ZETA) {
ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10;
ION_EFF_FACTOR_MINI = global_params.Pop3_ion * astro_params->F_STAR7_MINI * astro_params->F_ESC7_MINI;
}
else {
ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR;
ION_EFF_FACTOR_MINI = 0.;
}
// For recombinations
if(flag_options->INHOMO_RECO) {
if(INIT_RECOMBINATIONS) {
init_MHR();
INIT_RECOMBINATIONS=0;
}
if (prev_redshift < 1) //deal with first redshift
ZSTEP = (1. + redshift) * (global_params.ZPRIME_STEP_FACTOR - 1.);
else
ZSTEP = prev_redshift - redshift;
#pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) {
box->Gamma12_box[ct] = 0.0;
box->MFP_box[ct] = 0.0;
}
}
}
else {
ZSTEP = 0.2;
}
#pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) {
box->z_re_box[ct] = -1.0;
}
}
fabs_dtdz = fabs(dtdz(redshift))/1e15; //reduce to have good precision
t_ast = astro_params->t_STAR * t_hubble(redshift);
growth_factor_dz = dicke(redshift-dz);
// Modify the current sampled redshift to a redshift which matches the expected filling factor given our astrophysical parameterisation.
// This is the photon non-conservation correction
if(flag_options->PHOTON_CONS) {
adjust_redshifts_for_photoncons(astro_params,flag_options,&redshift,&stored_redshift,&absolute_delta_z);
LOG_DEBUG("PhotonCons data:");
LOG_DEBUG("original redshift=%f, updated redshift=%f delta-z = %f", stored_redshift, redshift, absolute_delta_z);
if(isfinite(redshift)==0 || isfinite(absolute_delta_z)==0) {
LOG_ERROR("Updated photon non-conservation redshift is either infinite or NaN!");
Throw(ParameterError);
}
}
Splined_Fcoll = 0.;
Splined_Fcoll_MINI = 0.;
double ArgBinWidth, InvArgBinWidth, erfc_arg_val, erfc_arg_min, erfc_arg_max;
int erfc_arg_val_index, ERFC_NUM_POINTS;
erfc_arg_val = 0.;
erfc_arg_val_index = 0;
// Setup an interpolation table for the error function, helpful for calcluating the collapsed fraction
// (only for the default model, i.e. mass-independent ionising efficiency)
erfc_arg_min = -15.0;
erfc_arg_max = 15.0;
ERFC_NUM_POINTS = 10000;
ArgBinWidth = (erfc_arg_max - erfc_arg_min)/((double)ERFC_NUM_POINTS - 1.);
InvArgBinWidth = 1./ArgBinWidth;
if(!flag_options->USE_MASS_DEPENDENT_ZETA && INIT_ERFC_INTERPOLATION) {
ERFC_VALS = calloc(ERFC_NUM_POINTS,sizeof(double));
ERFC_VALS_DIFF = calloc(ERFC_NUM_POINTS,sizeof(double));
#pragma omp parallel shared(ERFC_VALS,erfc_arg_min,ArgBinWidth) private(i,erfc_arg_val) num_threads(user_params->N_THREADS)
{
#pragma omp for
for(i=0;i<ERFC_NUM_POINTS;i++) {
erfc_arg_val = erfc_arg_min + ArgBinWidth*(double)i;
ERFC_VALS[i] = splined_erfc(erfc_arg_val);
}
}
#pragma omp parallel shared(ERFC_VALS_DIFF,ERFC_VALS) private(i) num_threads(user_params->N_THREADS)
{
#pragma omp for
for(i=0;i<(ERFC_NUM_POINTS-1);i++) {
ERFC_VALS_DIFF[i] = ERFC_VALS[i+1] - ERFC_VALS[i];
}
}
INIT_ERFC_INTERPOLATION = 0;
}
LOG_SUPER_DEBUG("erfc interpolation done");
///////////////////////////////// BEGIN INITIALIZATION //////////////////////////////////
// perform a very rudimentary check to see if we are underresolved and not using the linear approx
if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){
LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n It Is recommended \
that you either increase the resolution (DIM/Box_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n");
}
// initialize power spectrum
growth_factor = dicke(redshift);
prev_growth_factor = dicke(prev_redshift);
fftwf_complex *deltax_unfiltered, *deltax_unfiltered_original, *deltax_filtered;
fftwf_complex *xe_unfiltered, *xe_filtered, *N_rec_unfiltered, *N_rec_filtered;
fftwf_complex *prev_deltax_unfiltered, *prev_deltax_filtered;
fftwf_complex *M_coll_unfiltered,*M_coll_filtered;
deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
deltax_unfiltered_original = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
if (flag_options->USE_MINI_HALOS){
prev_deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
prev_deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
if(flag_options->USE_TS_FLUCT) {
xe_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
xe_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
if (flag_options->INHOMO_RECO){
N_rec_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); // cumulative number of recombinations
N_rec_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
if(flag_options->USE_MASS_DEPENDENT_ZETA) {
xi_SFR = calloc(NGL_SFR+1,sizeof(float));
wi_SFR = calloc(NGL_SFR+1,sizeof(float));
if(user_params->USE_INTERPOLATION_TABLES) {
log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double));
Overdense_spline_SFR = calloc(NSFR_high,sizeof(float));
log10_Nion_spline = calloc(NSFR_low,sizeof(float));
Nion_spline = calloc(NSFR_high,sizeof(float));
if (flag_options->USE_MINI_HALOS){
prev_log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double));
prev_Overdense_spline_SFR = calloc(NSFR_high,sizeof(float));
log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float));
Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float));
log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float));
Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float));
prev_log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float));
prev_Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float));
prev_log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float));
prev_Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float));
}
}
if (flag_options->USE_MINI_HALOS){
Mturns = calloc(NMTURN,sizeof(float));
Mturns_MINI = calloc(NMTURN,sizeof(float));
}
}
// Calculate the density field for this redshift if the initial conditions/cosmology are changing
if(flag_options->PHOTON_CONS) {
adjustment_factor = dicke(redshift)/dicke(stored_redshift);
}
else {
adjustment_factor = 1.;
}
#pragma omp parallel shared(deltax_unfiltered,perturbed_field,adjustment_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = (perturbed_field->density[HII_R_INDEX(i,j,k)])*adjustment_factor;
}
}
}
}
LOG_SUPER_DEBUG("density field calculated");
// keep the unfiltered density field in an array, to save it for later
memcpy(deltax_unfiltered_original, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
i=0;
// Newer setup to be performed in parallel
int thread_num;
for(thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){
// Original defaults with gsl_rng_mt19937 and SEED = 0, thus start with this and iterate for all other threads by their thread number
r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937);
gsl_rng_set(r[thread_num], thread_num);
}
pixel_mass = RtoM(L_FACTOR*user_params->BOX_LEN/(float)(user_params->HII_DIM));
cell_length_factor = L_FACTOR;
if(flag_options->USE_HALO_FIELD && (global_params.FIND_BUBBLE_ALGORITHM == 2) && ((user_params->BOX_LEN/(float)(user_params->HII_DIM) < 1))) {
cell_length_factor = 1.;
}
if (prev_redshift < 1){
LOG_DEBUG("first redshift, do some initialization");
previous_ionize_box->z_re_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float));
#pragma omp parallel shared(previous_ionize_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
previous_ionize_box->z_re_box[HII_R_INDEX(i, j, k)] = -1.0;
}
}
}
}
if (flag_options->INHOMO_RECO)
previous_ionize_box->dNrec_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float));
}
//set the minimum source mass
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
if (flag_options->USE_MINI_HALOS){
ave_log10_Mturnover = 0.;
ave_log10_Mturnover_MINI = 0.;
// this is the first z, and the previous_ionize_box are empty
if (prev_redshift < 1){
previous_ionize_box->Gamma12_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float));
// really painful to get the length...
counter = 1;
R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM));
while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) <= FRACT_FLOAT_ERR ){
if(R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) {
stored_R = R/(global_params.DELTA_R_HII_FACTOR);
}
R*= global_params.DELTA_R_HII_FACTOR;
counter += 1;
}
previous_ionize_box->Fcoll = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float));
previous_ionize_box->Fcoll_MINI = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float));
previous_ionize_box->mean_f_coll = 0.0;
previous_ionize_box->mean_f_coll_MINI = 0.0;
#pragma omp parallel shared(prev_deltax_unfiltered) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = -1.5;
}
}
}
}
}
else{
#pragma omp parallel shared(prev_deltax_unfiltered,previous_perturbed_field) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = previous_perturbed_field->density[HII_R_INDEX(i,j,k)];
}
}
}
}
}
LOG_SUPER_DEBUG("previous density field calculated");
// fields added for minihalos
Mcrit_atom = atomic_cooling_threshold(redshift);
log10_Mcrit_atom = log10(Mcrit_atom);
log10_Mcrit_mol = log10(lyman_werner_threshold(redshift, 0.));
log10_Mturnover_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
log10_Mturnover_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
log10_Mturnover_MINI_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
log10_Mturnover_MINI_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
if (!log10_Mturnover_unfiltered || !log10_Mturnover_filtered || !log10_Mturnover_MINI_unfiltered || !log10_Mturnover_MINI_filtered){// || !Mcrit_RE_grid || !Mcrit_LW_grid)
LOG_ERROR("Error allocating memory for Mturnover or Mturnover_MINI boxes");
Throw(MemoryAllocError);
}
LOG_SUPER_DEBUG("Calculating and outputting Mcrit boxes for atomic and molecular halos...");
#pragma omp parallel shared(redshift,previous_ionize_box,spin_temp,Mcrit_atom,log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered)\
private(x,y,z,Mcrit_RE,Mcrit_LW,Mturnover,Mturnover_MINI,log10_Mturnover,log10_Mturnover_MINI) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:ave_log10_Mturnover,ave_log10_Mturnover_MINI)
for (x=0; x<user_params->HII_DIM; x++){
for (y=0; y<user_params->HII_DIM; y++){
for (z=0; z<user_params->HII_DIM; z++){
Mcrit_RE = reionization_feedback(redshift, previous_ionize_box->Gamma12_box[HII_R_INDEX(x, y, z)], previous_ionize_box->z_re_box[HII_R_INDEX(x, y, z)]);
Mcrit_LW = lyman_werner_threshold(redshift, spin_temp->J_21_LW_box[HII_R_INDEX(x, y, z)]);
//*((float *)Mcrit_RE_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_RE;
//*((float *)Mcrit_LW_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_LW;
Mturnover = Mcrit_RE > Mcrit_atom ? Mcrit_RE : Mcrit_atom;
Mturnover_MINI = Mcrit_RE > Mcrit_LW ? Mcrit_RE : Mcrit_LW;
log10_Mturnover = log10(Mturnover);
log10_Mturnover_MINI = log10(Mturnover_MINI);
*((float *)log10_Mturnover_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover;
*((float *)log10_Mturnover_MINI_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover_MINI;
ave_log10_Mturnover += log10_Mturnover;
ave_log10_Mturnover_MINI += log10_Mturnover_MINI;
}
}
}
}
box->log10_Mturnover_ave = ave_log10_Mturnover/(double) HII_TOT_NUM_PIXELS;
box->log10_Mturnover_MINI_ave = ave_log10_Mturnover_MINI/(double) HII_TOT_NUM_PIXELS;
Mturnover = pow(10., box->log10_Mturnover_ave);
Mturnover_MINI = pow(10., box->log10_Mturnover_MINI_ave);
M_MIN = global_params.M_MIN_INTEGRAL;
Mlim_Fstar_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR, astro_params->F_STAR7_MINI * pow(1e3,astro_params->ALPHA_STAR));
Mlim_Fesc_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC7_MINI * pow(1e3, astro_params->ALPHA_ESC));
LOG_SUPER_DEBUG("average turnover masses are %.2f and %.2f for ACGs and MCGs", box->log10_Mturnover_ave, box->log10_Mturnover_MINI_ave);
}
else{
M_MIN = astro_params->M_TURN/50.;
Mturnover = astro_params->M_TURN;
box->log10_Mturnover_ave = log10(Mturnover);
box->log10_Mturnover_MINI_ave = log10(Mturnover);
}
Mlim_Fstar = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR, astro_params->F_STAR10);
Mlim_Fesc = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC10);
}
else {
//set the minimum source mass
if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM
M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 1.22);
}
else { // ionized IGM
M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 0.6);
}
}
LOG_SUPER_DEBUG("minimum source mass has been set: %f", M_MIN);
if(user_params->USE_INTERPOLATION_TABLES) {
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20);
}
else{
if(!flag_options->USE_TS_FLUCT) {
initialiseSigmaMInterpTable(M_MIN,1e20);
}
else if(flag_options->USE_MINI_HALOS){
initialiseSigmaMInterpTable(global_params.M_MIN_INTEGRAL/50.,1e20);
}
}
}
LOG_SUPER_DEBUG("sigma table has been initialised");
// check for WDM
if (global_params.P_CUTOFF && ( M_MIN < M_J_WDM())){
LOG_WARNING("The default Jeans mass of %e Msun is smaller than the scale supressed by the effective pressure of WDM.", M_MIN);
M_MIN = M_J_WDM();
LOG_WARNING("Setting a new effective Jeans mass from WDM pressure supression of %e Msun", M_MIN);
}
// ARE WE USING A DISCRETE HALO FIELD (identified in the ICs with FindHaloes.c and evolved with PerturbHaloField.c)
if(flag_options->USE_HALO_FIELD) {
M_coll_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
M_coll_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
#pragma omp parallel shared(M_coll_unfiltered) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (ct=0; ct<HII_TOT_FFT_NUM_PIXELS; ct++){
*((float *)M_coll_unfiltered + ct) = 0;
}
}
#pragma omp parallel shared(M_coll_unfiltered,halos) \
private(i_halo,x,y,z) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i_halo=0; i_halo<halos->n_halos; i_halo++){
x = halos->halo_coords[0+3*i_halo];
y = halos->halo_coords[1+3*i_halo];
z = halos->halo_coords[2+3*i_halo];
#pragma omp atomic
*((float *)M_coll_unfiltered + HII_R_FFT_INDEX(x, y, z)) += halos->halo_masses[i_halo];
}
}
} // end of the USE_HALO_FIELD option
// lets check if we are going to bother with computing the inhmogeneous field at all...
global_xH = 0.0;
// Determine the normalisation for the excursion set algorithm
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
if (flag_options->USE_MINI_HALOS){
if (previous_ionize_box->mean_f_coll * ION_EFF_FACTOR < 1e-4){
box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc);
}
else{
box->mean_f_coll = previous_ionize_box->mean_f_coll + \
Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc) - \
Nion_General(prev_redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc);
}
if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI < 1e-4){
box->mean_f_coll_MINI = Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom,
astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,
astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI);
}
else{
box->mean_f_coll_MINI = previous_ionize_box->mean_f_coll_MINI + \
Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI
,Mlim_Fstar_MINI,Mlim_Fesc_MINI) - \
Nion_General_MINI(prev_redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI,
Mlim_Fstar_MINI,Mlim_Fesc_MINI);
}
f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc);
f_coll_min_MINI = Nion_General_MINI(global_params.Z_HEAT_MAX,M_MIN,Mturnover_MINI,Mcrit_atom,
astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,
astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI);
}
else{
box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc);
box->mean_f_coll_MINI = 0.;
f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc);
}
}
else {
box->mean_f_coll = FgtrM_General(redshift, M_MIN);
}
if(isfinite(box->mean_f_coll)==0) {
LOG_ERROR("Mean collapse fraction is either infinite or NaN!");
Throw(ParameterError);
}
LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll: %e", box->mean_f_coll);
if (flag_options->USE_MINI_HALOS){
if(isfinite(box->mean_f_coll_MINI)==0) {
LOG_ERROR("Mean collapse fraction of MINI is either infinite or NaN!");
Throw(ParameterError);
}
LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll_MINI: %e", box->mean_f_coll_MINI);
}
if (box->mean_f_coll * ION_EFF_FACTOR + box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI< global_params.HII_ROUND_ERR){ // way too small to ionize anything...
// printf( "The mean collapse fraction is %e, which is much smaller than the effective critical collapse fraction of %e\n I will just declare everything to be neutral\n", mean_f_coll, f_coll_crit);
// find the neutral fraction
if(flag_options->USE_TS_FLUCT) {
#pragma omp parallel shared(box,spin_temp) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:global_xH)
for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){
box->xH_box[ct] = 1.-spin_temp->x_e_box[ct]; // convert from x_e to xH
global_xH += box->xH_box[ct];
box->temp_kinetic_all_gas[ct] = spin_temp->Tk_box[ct];
}
}
global_xH /= (double)HII_TOT_NUM_PIXELS;
}
else {
global_xH = 1. - xion_RECFAST(redshift, 0);
#pragma omp parallel shared(box,global_xH,TK) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){
box->xH_box[ct] = global_xH;
box->temp_kinetic_all_gas[ct] = TK;
}
}
}
}
else {
// Take the ionisation fraction from the X-ray ionisations from Ts.c (only if the calculate spin temperature flag is set)
if (flag_options->USE_TS_FLUCT) {
#pragma omp parallel shared(xe_unfiltered, spin_temp) private(i, j, k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i = 0; i < user_params->HII_DIM; i++) {
for (j = 0; j < user_params->HII_DIM; j++) {
for (k = 0; k < user_params->HII_DIM; k++) {
*((float *) xe_unfiltered + HII_R_FFT_INDEX(i, j, k)) = spin_temp->x_e_box[HII_R_INDEX(i, j, k)];
}
}
}
}
}
LOG_SUPER_DEBUG("calculated ionization fraction");
if (flag_options->INHOMO_RECO) {
#pragma omp parallel shared(N_rec_unfiltered, previous_ionize_box) private(i, j, k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i = 0; i < user_params->HII_DIM; i++) {
for (j = 0; j < user_params->HII_DIM; j++) {
for (k = 0; k < user_params->HII_DIM; k++) {
*((float *) N_rec_unfiltered +
HII_R_FFT_INDEX(i, j, k)) = previous_ionize_box->dNrec_box[HII_R_INDEX(i, j, k)];
}
}
}
}
}
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_unfiltered);
LOG_SUPER_DEBUG("FFTs performed");
if(flag_options->USE_MINI_HALOS){
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_unfiltered);
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_unfiltered);
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_unfiltered);
LOG_SUPER_DEBUG("MINI HALO ffts performed");
}
if (flag_options->USE_HALO_FIELD){
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_unfiltered);
LOG_SUPER_DEBUG("HALO_FIELD ffts performed");
}
if(flag_options->USE_TS_FLUCT) {
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_unfiltered);
LOG_SUPER_DEBUG("Ts ffts performed");
}
if (flag_options->INHOMO_RECO) {
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_unfiltered);
}
// remember to add the factor of VOLUME/TOT_NUM_PIXELS when converting from
// real space to k-space
// Note: we will leave off factor of VOLUME, in anticipation of the inverse FFT below
#pragma omp parallel shared(deltax_unfiltered,xe_unfiltered,N_rec_unfiltered,prev_deltax_unfiltered,\
log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered,M_coll_unfiltered) \
private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (ct=0; ct<HII_KSPACE_NUM_PIXELS; ct++){
deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0);
if(flag_options->USE_TS_FLUCT) { xe_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; }
if (flag_options->INHOMO_RECO){ N_rec_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; }
if(flag_options->USE_HALO_FIELD) { M_coll_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; }
if(flag_options->USE_MINI_HALOS){
prev_deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0);
log10_Mturnover_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0);
log10_Mturnover_MINI_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0);
}
}
}
LOG_SUPER_DEBUG("deltax unfiltered calculated");
// ************************************************************************************* //
// ***************** LOOP THROUGH THE FILTER RADII (in Mpc) *************************** //
// ************************************************************************************* //
// set the max radius we will use, making sure we are always sampling the same values of radius
// (this avoids aliasing differences w redshift)
short_completely_ionised = 0;
// loop through the filter radii (in Mpc)
erfc_denom_cell = 1; //dummy value
R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM));
while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) <= FRACT_FLOAT_ERR) {
R *= global_params.DELTA_R_HII_FACTOR;
if (R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) {
stored_R = R / (global_params.DELTA_R_HII_FACTOR);
}
}
LOG_DEBUG("set max radius: %f", R);
R=fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN);
LAST_FILTER_STEP = 0;
first_step_R = 1;
double R_temp = (double) (astro_params->R_BUBBLE_MAX);
counter = 0;
while (!LAST_FILTER_STEP && (M_MIN < RtoM(R)) ){
LOG_ULTRA_DEBUG("while loop for until RtoM(R)=%f reaches M_MIN=%f", RtoM(R), M_MIN);
// Check if we are the last filter step
if ( ((R/(global_params.DELTA_R_HII_FACTOR) - cell_length_factor*(user_params->BOX_LEN)/(float)(user_params->HII_DIM)) <= FRACT_FLOAT_ERR) || \
((R/(global_params.DELTA_R_HII_FACTOR) - global_params.R_BUBBLE_MIN) <= FRACT_FLOAT_ERR) ) {
LAST_FILTER_STEP = 1;
R = fmax(cell_length_factor*user_params->BOX_LEN/(double)(user_params->HII_DIM), global_params.R_BUBBLE_MIN);
}
// Copy all relevant quantities from memory into new arrays to be smoothed and FFT'd.
if (flag_options->USE_TS_FLUCT) {
memcpy(xe_filtered, xe_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS);
}
if (flag_options->INHOMO_RECO) {
memcpy(N_rec_filtered, N_rec_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS);
}
if (flag_options->USE_HALO_FIELD) {
memcpy(M_coll_filtered, M_coll_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS);
}
memcpy(deltax_filtered, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
if(flag_options->USE_MINI_HALOS){
memcpy(prev_deltax_filtered, prev_deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
memcpy(log10_Mturnover_MINI_filtered, log10_Mturnover_MINI_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
memcpy(log10_Mturnover_filtered, log10_Mturnover_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
if (!LAST_FILTER_STEP ||
((R - cell_length_factor * (user_params->BOX_LEN / (double) (user_params->HII_DIM))) >
FRACT_FLOAT_ERR)) {
if (flag_options->USE_TS_FLUCT) {
filter_box(xe_filtered, 1, global_params.HII_FILTER, R);
}
if (flag_options->INHOMO_RECO) {
filter_box(N_rec_filtered, 1, global_params.HII_FILTER, R);
}
if (flag_options->USE_HALO_FIELD) {
filter_box(M_coll_filtered, 1, global_params.HII_FILTER, R);
}
filter_box(deltax_filtered, 1, global_params.HII_FILTER, R);
if(flag_options->USE_MINI_HALOS){
filter_box(prev_deltax_filtered, 1, global_params.HII_FILTER, R);
filter_box(log10_Mturnover_MINI_filtered, 1, global_params.HII_FILTER, R);
filter_box(log10_Mturnover_filtered, 1, global_params.HII_FILTER, R);
}
}
// Perform FFTs
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_filtered);
if(flag_options->USE_MINI_HALOS){
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_filtered);
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_filtered);
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_filtered);
}
if (flag_options->USE_HALO_FIELD) {
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_filtered);
}
if (flag_options->USE_TS_FLUCT) {
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_filtered);
}
if (flag_options->INHOMO_RECO) {
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_filtered);
}
// Check if this is the last filtering scale. If so, we don't need deltax_unfiltered anymore.
// We will re-read it to get the real-space field, which we will use to set the residual neutral fraction
ST_over_PS = 0;
ST_over_PS_MINI = 0;
f_coll = 0;
f_coll_MINI = 0;
massofscaleR = RtoM(R);
if(!user_params->USE_INTERPOLATION_TABLES) {
sigmaMmax = sigma_z0(massofscaleR);
}
if (!flag_options->USE_HALO_FIELD) {
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
min_density = max_density = 0.0;
#pragma omp parallel shared(deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(max:max_density) reduction(min:min_density)
for (x = 0; x < user_params->HII_DIM; x++) {
for (y = 0; y < user_params->HII_DIM; y++) {
for (z = 0; z < user_params->HII_DIM; z++) {
// delta cannot be less than -1
*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(
*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR);
if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) < min_density) {
min_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z));
}
if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) > max_density) {
max_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z));
}
}
}
}
}
if(user_params->USE_INTERPOLATION_TABLES) {
InterpolationRange(1,R,user_params->BOX_LEN,&min_density, &max_density);
}
if (flag_options->USE_MINI_HALOS){
// do the same for prev
prev_min_density = prev_max_density = 0.0;
#pragma omp parallel shared(prev_deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(max:prev_max_density) reduction(min:prev_min_density)
for (x=0; x<user_params->HII_DIM; x++){
for (y=0; y<user_params->HII_DIM; y++){
for (z=0; z<user_params->HII_DIM; z++){
// delta cannot be less than -1
*((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) = \
FMAX(*((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) , -1.+FRACT_FLOAT_ERR);
if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) < prev_min_density ) {
prev_min_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z));
}
if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) > prev_max_density ) {
prev_max_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z));
}
}
}
}
}
if(user_params->USE_INTERPOLATION_TABLES) {
InterpolationRange(2,R,user_params->BOX_LEN,&prev_min_density, &prev_max_density);
}
// do the same for logM
log10Mturn_min = 999;
log10Mturn_max = 0.0;
log10Mturn_min_MINI = 999;
log10Mturn_max_MINI = 0.0;
#pragma omp parallel shared(log10_Mturnover_filtered,log10_Mturnover_MINI_filtered,log10_Mcrit_atom,log10_Mcrit_mol) private(x, y, z) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(max:log10Mturn_max,log10Mturn_max_MINI) reduction(min:log10Mturn_min,log10Mturn_min_MINI)
for (x=0; x<user_params->HII_DIM; x++){
for (y=0; y<user_params->HII_DIM; y++){
for (z=0; z<user_params->HII_DIM; z++){
if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_atom)
*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_atom;
if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX)
*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX;
// Mturnover cannot be less than Mcrit_mol
if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_mol)
*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_mol;
if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX)
*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX;
if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min)
log10Mturn_min = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z));
if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max)
log10Mturn_max = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z));
if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min_MINI)
log10Mturn_min_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z));
if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max_MINI)
log10Mturn_max_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z));
}
}
}
}
if(user_params->USE_INTERPOLATION_TABLES) {
log10Mturn_min = log10Mturn_min *0.99;
log10Mturn_max = log10Mturn_max *1.01;
log10Mturn_min_MINI = log10Mturn_min_MINI *0.99;
log10Mturn_max_MINI = log10Mturn_max_MINI *1.01;
log10Mturn_bin_width = (log10Mturn_max - log10Mturn_min) / NMTURN;
log10Mturn_bin_width_inv = 1./log10Mturn_bin_width;
log10Mturn_bin_width_MINI = (log10Mturn_max_MINI - log10Mturn_min_MINI) / NMTURN;
log10Mturn_bin_width_inv_MINI = 1./log10Mturn_bin_width_MINI;
}
}
initialiseGL_Nion(NGL_SFR, M_MIN,massofscaleR);
if(user_params->USE_INTERPOLATION_TABLES) {
if(flag_options->USE_MINI_HALOS){
initialise_Nion_General_spline_MINI(redshift,Mcrit_atom,min_density,max_density,massofscaleR,M_MIN,
log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI,log10Mturn_max_MINI,
astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR10,
astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc,astro_params->F_STAR7_MINI,
astro_params->F_ESC7_MINI,Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES);
if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){
initialise_Nion_General_spline_MINI_prev(prev_redshift,Mcrit_atom,prev_min_density,prev_max_density,
massofscaleR,M_MIN,log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI,
log10Mturn_max_MINI,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,
astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc,
astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI,
Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES);
}
}
else{
initialise_Nion_General_spline(redshift,min_density,max_density,massofscaleR,astro_params->M_TURN,
astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR10,
astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES);
}
}
}
else {
erfc_denom = 2. * (pow(sigma_z0(M_MIN), 2) - pow(sigma_z0(massofscaleR), 2));
if (erfc_denom < 0) { // our filtering scale has become too small
break;
}
erfc_denom = sqrt(erfc_denom);
erfc_denom = 1. / (growth_factor * erfc_denom);
}
}
// Determine the global averaged f_coll for the overall normalisation
// Reset value of int check to see if we are over-stepping our interpolation table
for (i = 0; i < user_params->N_THREADS; i++) {
overdense_int_boundexceeded_threaded[i] = 0;
}
// renormalize the collapse fraction so that the mean matches ST,
// since we are using the evolved (non-linear) density field
#pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,overdense_int_boundexceeded_threaded,log10_Nion_spline,Nion_spline,erfc_denom,erfc_arg_min,\
erfc_arg_max,InvArgBinWidth,ArgBinWidth,ERFC_VALS_DIFF,ERFC_VALS,log10_Mturnover_filtered,log10Mturn_min,log10Mturn_bin_width_inv, \
log10_Mturnover_MINI_filtered,log10Mturn_bin_width_inv_MINI,log10_Nion_spline_MINI,prev_deltax_filtered,previous_ionize_box,ION_EFF_FACTOR,\
prev_overdense_small_min,prev_overdense_small_bin_width_inv,prev_log10_Nion_spline,prev_log10_Nion_spline_MINI,prev_overdense_large_min,\
prev_overdense_large_bin_width_inv,prev_Nion_spline,prev_Nion_spline_MINI,box,counter,M_coll_filtered,massofscaleR,pixel_volume,sigmaMmax,\
M_MIN,growth_factor,Mlim_Fstar,Mlim_Fesc,Mcrit_atom,Mlim_Fstar_MINI,Mlim_Fesc_MINI,prev_growth_factor) \
private(x,y,z,curr_dens,Splined_Fcoll,Splined_Fcoll_MINI,dens_val,overdense_int,erfc_arg_val,erfc_arg_val_index,log10_Mturnover,\
log10_Mturnover_int,log10_Mturnover_MINI,log10_Mturnover_MINI_int,prev_dens,prev_Splined_Fcoll,prev_Splined_Fcoll_MINI,\
prev_dens_val,density_over_mean,status_int) \
num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:f_coll,f_coll_MINI)
for (x = 0; x < user_params->HII_DIM; x++) {
for (y = 0; y < user_params->HII_DIM; y++) {
for (z = 0; z < user_params->HII_DIM; z++) {
// delta cannot be less than -1
*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(
*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR);
// <N_rec> cannot be less than zero
if (flag_options->INHOMO_RECO) {
*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)), 0.0);
}
// x_e has to be between zero and unity
if (flag_options->USE_TS_FLUCT) {
*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.);
*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = FMIN(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.999);
}
if(flag_options->USE_HALO_FIELD) {
// collapsed mass cannot be less than zero
*((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) = FMAX(
*((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) , 0.0);
density_over_mean = 1.0 + *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z));
Splined_Fcoll = *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) / (massofscaleR*density_over_mean);
Splined_Fcoll *= (4/3.0)*PI*pow(R,3) / pixel_volume;
}
else {
curr_dens = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z));
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
if (flag_options->USE_MINI_HALOS){
log10_Mturnover = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z));
log10_Mturnover_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z));
if(user_params->USE_INTERPOLATION_TABLES) {
status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,log10_Mturnover,log10_Mturnover_MINI,
&Splined_Fcoll,&Splined_Fcoll_MINI);
if(status_int > 0) {
overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int;
}
}
else {
Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens,
pow(10.,log10_Mturnover),astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR10,
astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES);
Splined_Fcoll_MINI = Nion_ConditionalM_MINI(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens,
pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI,
Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES);
}
prev_dens = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z));
if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){
if(user_params->USE_INTERPOLATION_TABLES) {
status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,2,prev_dens,log10_Mturnover,log10_Mturnover_MINI,
&prev_Splined_Fcoll,&prev_Splined_Fcoll_MINI);
if(status_int > 0) {
overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int;
}
}
else {
prev_Splined_Fcoll = Nion_ConditionalM(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens,
pow(10.,log10_Mturnover),astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR10,
astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES);
prev_Splined_Fcoll_MINI = Nion_ConditionalM_MINI(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens,
pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI,
Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES);
}
}
else{
prev_Splined_Fcoll = 0.;
prev_Splined_Fcoll_MINI = 0.;
}
}
else{
if(user_params->USE_INTERPOLATION_TABLES) {
status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,0.,0.,&Splined_Fcoll,&Splined_Fcoll_MINI);
if(status_int > 0) {
overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int;
}
}
else {
Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens,
astro_params->M_TURN,astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC,astro_params->F_STAR10,
astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES);
}
}
}
else {
erfc_arg_val = (Deltac - curr_dens) * erfc_denom;
if (erfc_arg_val < erfc_arg_min || erfc_arg_val > erfc_arg_max) {
Splined_Fcoll = splined_erfc(erfc_arg_val);
} else {
erfc_arg_val_index = (int) floor((erfc_arg_val - erfc_arg_min) * InvArgBinWidth);
Splined_Fcoll = ERFC_VALS[erfc_arg_val_index] + \
(erfc_arg_val - (erfc_arg_min + ArgBinWidth * (double) erfc_arg_val_index)) * ERFC_VALS_DIFF[erfc_arg_val_index] *InvArgBinWidth;
}
}
}
// save the value of the collasped fraction into the Fcoll array
if (flag_options->USE_MINI_HALOS){
if (Splined_Fcoll > 1.) Splined_Fcoll = 1.;
if (Splined_Fcoll < 0.) Splined_Fcoll = 1e-40;
if (prev_Splined_Fcoll > 1.) prev_Splined_Fcoll = 1.;
if (prev_Splined_Fcoll < 0.) prev_Splined_Fcoll = 1e-40;
box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \
previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll - prev_Splined_Fcoll;
if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.;
//if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40;
//if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)])
// box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
f_coll += box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
if(isfinite(f_coll)==0) {
LOG_ERROR("f_coll is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g,%g,%g",\
x,y,z,curr_dens,prev_dens,previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\
Splined_Fcoll, prev_Splined_Fcoll, curr_dens, prev_dens, \
log10_Mturnover, *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)));
Throw(ParameterError);
}
if (Splined_Fcoll_MINI > 1.) Splined_Fcoll_MINI = 1.;
if (Splined_Fcoll_MINI < 0.) Splined_Fcoll_MINI = 1e-40;
if (prev_Splined_Fcoll_MINI > 1.) prev_Splined_Fcoll_MINI = 1.;
if (prev_Splined_Fcoll_MINI < 0.) prev_Splined_Fcoll_MINI = 1e-40;
box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \
previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll_MINI - prev_Splined_Fcoll_MINI;
if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.;
//if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40;
//if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)])
// box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
f_coll_MINI += box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
if(isfinite(f_coll_MINI)==0) {
LOG_ERROR("f_coll_MINI is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g",\
x,y,z,curr_dens, prev_dens, previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\
Splined_Fcoll_MINI, prev_Splined_Fcoll_MINI, log10_Mturnover_MINI,\
*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)));
LOG_DEBUG("%g,%g",previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\
previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]);
LOG_DEBUG("%g,%g,%g,%g,%g,%g,%g,%g,",log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, \
log10Mturn_bin_width_inv, log10Mturn_max_MINI, log10Mturn_min_MINI, \
log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI);
LOG_DEBUG("%g,%g,%g,%g,%d",curr_dens, overdense_small_min, overdense_small_bin_width_inv, dens_val, overdense_int);
LOG_DEBUG("%d,%g,%g,%g",log10_Mturnover_MINI_int, log10_Mturnover_MINI, log10Mturn_min_MINI, log10Mturn_bin_width_inv_MINI);
LOG_DEBUG("%g", *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)));
LOG_DEBUG("%d", counter);
LOG_DEBUG("%g,%g,%g,%g",log10_Nion_spline_MINI[overdense_int + NSFR_low* log10_Mturnover_MINI_int ], \
log10_Nion_spline_MINI[overdense_int +1+ NSFR_low* log10_Mturnover_MINI_int ], \
log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)], \
log10_Nion_spline_MINI[overdense_int +1+ NSFR_low*(log10_Mturnover_MINI_int+1)]);
Throw(ParameterError);
}
}
else{
box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = Splined_Fcoll;
f_coll += Splined_Fcoll;
}
}
}
}
} // end loop through Fcoll box
for (i = 0; i < user_params->N_THREADS; i++) {
if (overdense_int_boundexceeded_threaded[i] == 1) {
LOG_ERROR("I have overstepped my allocated memory for one of the interpolation tables for the nion_splines");
Throw(ParameterError);
}
}
if(isfinite(f_coll)==0) {
LOG_ERROR("f_coll is either infinite or NaN!");
Throw(ParameterError);
}
f_coll /= (double) HII_TOT_NUM_PIXELS;
if(isfinite(f_coll_MINI)==0) {
LOG_ERROR("f_coll_MINI is either infinite or NaN!");
Throw(ParameterError);
}
f_coll_MINI /= (double) HII_TOT_NUM_PIXELS;
// To avoid ST_over_PS becoming nan when f_coll = 0, I set f_coll = FRACT_FLOAT_ERR.
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
if (f_coll <= f_coll_min) f_coll = f_coll_min;
if (flag_options->USE_MINI_HALOS){
if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI;
}
}
else {
if (f_coll <= FRACT_FLOAT_ERR) f_coll = FRACT_FLOAT_ERR;
}
ST_over_PS = box->mean_f_coll/f_coll;
ST_over_PS_MINI = box->mean_f_coll_MINI/f_coll_MINI;
////////////////////////////// MAIN LOOP THROUGH THE BOX ///////////////////////////////////
// now lets scroll through the filtered box
Gamma_R_prefactor = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR / 1.0e-12;
Gamma_R_prefactor_MINI = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR_MINI / 1.0e-12;
if(flag_options->PHOTON_CONS) {
// Used for recombinations, which means we want to use the original redshift not the adjusted redshift
Gamma_R_prefactor *= pow(1+stored_redshift, 2);
Gamma_R_prefactor_MINI *= pow(1+stored_redshift, 2);
}
else {
Gamma_R_prefactor *= pow(1+redshift, 2);
Gamma_R_prefactor_MINI *= pow(1+redshift, 2);
}
Gamma_R_prefactor /= t_ast;
Gamma_R_prefactor_MINI /= t_ast;
if (global_params.FIND_BUBBLE_ALGORITHM != 2 && global_params.FIND_BUBBLE_ALGORITHM != 1) { // center method
LOG_ERROR("Incorrect choice of find bubble algorithm: %i",
global_params.FIND_BUBBLE_ALGORITHM);
Throw(ValueError);
}
#pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,box,ST_over_PS,pixel_mass,M_MIN,r,f_coll_min,Gamma_R_prefactor,\
ION_EFF_FACTOR,ION_EFF_FACTOR_MINI,LAST_FILTER_STEP,counter,ST_over_PS_MINI,f_coll_min_MINI,Gamma_R_prefactor_MINI,TK) \
private(x,y,z,curr_dens,Splined_Fcoll,f_coll,ave_M_coll_cell,ave_N_min_cell,N_halos_in_cell,rec,xHII_from_xrays,res_xH,\
Splined_Fcoll_MINI,f_coll_MINI) \
num_threads(user_params->N_THREADS)
{
#pragma omp for
for (x = 0; x < user_params->HII_DIM; x++) {
for (y = 0; y < user_params->HII_DIM; y++) {
for (z = 0; z < user_params->HII_DIM; z++) {
curr_dens = *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z));
Splined_Fcoll = box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
f_coll = ST_over_PS * Splined_Fcoll;
if (flag_options->USE_MINI_HALOS){
Splined_Fcoll_MINI = box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)];
f_coll_MINI = ST_over_PS_MINI * Splined_Fcoll_MINI;
}
else{
f_coll_MINI = 0.;
}
if (LAST_FILTER_STEP){
ave_M_coll_cell = (f_coll + f_coll_MINI) * pixel_mass * (1. + curr_dens);
ave_N_min_cell = ave_M_coll_cell / M_MIN; // ave # of M_MIN halos in cell
if(user_params->NO_RNG) {
N_halos_in_cell = 1.;
}
else {
N_halos_in_cell = (int) gsl_ran_poisson(r[omp_get_thread_num()],
global_params.N_POISSON);
}
}
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
if (f_coll <= f_coll_min) f_coll = f_coll_min;
if (flag_options->USE_MINI_HALOS){
if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI;
}
}
if (flag_options->INHOMO_RECO) {
rec = (*((float *) N_rec_filtered +
HII_R_FFT_INDEX(x, y, z))); // number of recombinations per mean baryon
rec /= (1. + curr_dens); // number of recombinations per baryon inside <R>
} else {
rec = 0.;
}
// adjust the denominator of the collapse fraction for the residual electron fraction in the neutral medium
if (flag_options->USE_TS_FLUCT){
xHII_from_xrays = *((float *)xe_filtered + HII_R_FFT_INDEX(x,y,z));
} else {
xHII_from_xrays = 0.;
}
// check if fully ionized!
if ( (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI> (1. - xHII_from_xrays)*(1.0+rec)) ){ //IONIZED!!
// if this is the first crossing of the ionization barrier for this cell (largest R), record the gamma
// this assumes photon-starved growth of HII regions... breaks down post EoR
if (flag_options->INHOMO_RECO && (box->xH_box[HII_R_INDEX(x,y,z)] > FRACT_FLOAT_ERR) ){
box->Gamma12_box[HII_R_INDEX(x,y,z)] = Gamma_R_prefactor * f_coll + Gamma_R_prefactor_MINI * f_coll_MINI;
box->MFP_box[HII_R_INDEX(x,y,z)] = R;
}
// keep track of the first time this cell is ionized (earliest time)
if (previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)] < 0){
box->z_re_box[HII_R_INDEX(x,y,z)] = redshift;
} else{
box->z_re_box[HII_R_INDEX(x,y,z)] = previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)];
}
// FLAG CELL(S) AS IONIZED
if (global_params.FIND_BUBBLE_ALGORITHM == 2) // center method
box->xH_box[HII_R_INDEX(x,y,z)] = 0;
if (global_params.FIND_BUBBLE_ALGORITHM == 1) // sphere method
update_in_sphere(box->xH_box, user_params->HII_DIM, R/(user_params->BOX_LEN), \
x/(user_params->HII_DIM+0.0), y/(user_params->HII_DIM+0.0), z/(user_params->HII_DIM+0.0));
} // end ionized
// If not fully ionized, then assign partial ionizations
else if (LAST_FILTER_STEP && (box->xH_box[HII_R_INDEX(x, y, z)] > TINY)) {
if (f_coll>1) f_coll=1;
if (f_coll_MINI>1) f_coll_MINI=1;
if (!flag_options->USE_HALO_FIELD){
if(ave_N_min_cell < global_params.N_POISSON) {
f_coll = N_halos_in_cell * ( ave_M_coll_cell / (float)global_params.N_POISSON ) / (pixel_mass*(1. + curr_dens));
if (flag_options->USE_MINI_HALOS){
f_coll_MINI = f_coll * (f_coll_MINI * ION_EFF_FACTOR_MINI) / (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI);
f_coll = f_coll - f_coll_MINI;
}
else{
f_coll_MINI = 0.;
}
}
if (ave_M_coll_cell < (M_MIN / 5.)) {
f_coll = 0.;
f_coll_MINI = 0.;
}
}
if (f_coll>1) f_coll=1;
if (f_coll_MINI>1) f_coll_MINI=1;
res_xH = 1. - f_coll * ION_EFF_FACTOR - f_coll_MINI * ION_EFF_FACTOR_MINI;
// put the partial ionization here because we need to exclude xHII_from_xrays...
if (flag_options->USE_TS_FLUCT){
box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(spin_temp->Tk_box[HII_R_INDEX(x,y,z)], res_xH);
}
else{
box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(TK, res_xH);
}
res_xH -= xHII_from_xrays;
// and make sure fraction doesn't blow up for underdense pixels
if (res_xH < 0)
res_xH = 0;
else if (res_xH > 1)
res_xH = 1;
box->xH_box[HII_R_INDEX(x, y, z)] = res_xH;
} // end partial ionizations at last filtering step
} // k
} // j
} // i
}
if (first_step_R) {
R = stored_R;
first_step_R = 0;
} else {
R /= (global_params.DELTA_R_HII_FACTOR);
}
if (flag_options->USE_MINI_HALOS)
counter += 1;
}
#pragma omp parallel shared(box,spin_temp,redshift,deltax_unfiltered_original,TK) private(x,y,z) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (x=0; x<user_params->HII_DIM; x++){
for (y=0; y<user_params->HII_DIM; y++){
for (z=0; z<user_params->HII_DIM; z++){
if ((box->z_re_box[HII_R_INDEX(x,y,z)]>0) && (box->xH_box[HII_R_INDEX(x,y,z)] < TINY)){
box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputeFullyIoinizedTemperature(box->z_re_box[HII_R_INDEX(x,y,z)], \
redshift, *((float *)deltax_unfiltered_original + HII_R_FFT_INDEX(x,y,z)));
// Below sometimes (very rare though) can happen when the density drops too fast and to below T_HI
if (flag_options->USE_TS_FLUCT){
if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < spin_temp->Tk_box[HII_R_INDEX(x,y,z)])
box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = spin_temp->Tk_box[HII_R_INDEX(x,y,z)];
}
else{
if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < TK)
box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = TK;
}
}
}
}
}
}
for (x=0; x<user_params->HII_DIM; x++){
for (y=0; y<user_params->HII_DIM; y++){
for (z=0; z<user_params->HII_DIM; z++){
if(isfinite(box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)])==0){
LOG_ERROR("Tk after fully ioinzation is either infinite or a Nan. Something has gone wrong "\
"in the temperature calculation: z_re=%.4f, redshift=%.4f, curr_dens=%.4e", box->z_re_box[HII_R_INDEX(x,y,z)], redshift, curr_dens);
Throw(ParameterError);
}
}
}
}
// find the neutral fraction
if (LOG_LEVEL >= DEBUG_LEVEL) {
global_xH = 0;
#pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:global_xH)
for (ct = 0; ct < HII_TOT_NUM_PIXELS; ct++) {
global_xH += box->xH_box[ct];
}
}
global_xH /= (float) HII_TOT_NUM_PIXELS;
}
if (isfinite(global_xH) == 0) {
LOG_ERROR(
"Neutral fraction is either infinite or a Nan. Something has gone wrong in the ionisation calculation!");
Throw(ParameterError);
}
// update the N_rec field
if (flag_options->INHOMO_RECO) {
#pragma omp parallel shared(perturbed_field, adjustment_factor, stored_redshift, redshift, box, previous_ionize_box, \
fabs_dtdz, ZSTEP, something_finite_or_infinite) \
private(x, y, z, curr_dens, z_eff, dNrec) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (x = 0; x < user_params->HII_DIM; x++) {
for (y = 0; y < user_params->HII_DIM; y++) {
for (z = 0; z < user_params->HII_DIM; z++) {
// use the original density and redshift for the snapshot (not the adjusted redshift)
// Only want to use the adjusted redshift for the ionisation field
curr_dens = 1.0 + (perturbed_field->density[HII_R_INDEX(x, y, z)]) / adjustment_factor;
z_eff = pow(curr_dens, 1.0 / 3.0);
if (flag_options->PHOTON_CONS) {
z_eff *= (1 + stored_redshift);
} else {
z_eff *= (1 + redshift);
}
dNrec = splined_recombination_rate(z_eff - 1., box->Gamma12_box[HII_R_INDEX(x, y, z)]) *
fabs_dtdz * ZSTEP * (1. - box->xH_box[HII_R_INDEX(x, y, z)]);
if (isfinite(dNrec) == 0) {
something_finite_or_infinite = 1;
}
box->dNrec_box[HII_R_INDEX(x, y, z)] =
previous_ionize_box->dNrec_box[HII_R_INDEX(x, y, z)] + dNrec;
}
}
}
}
if (something_finite_or_infinite) {
LOG_ERROR("Recombinations have returned either an infinite or NaN value.");
Throw(ParameterError);
}
}
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
}
destruct_heat();
for (i=0; i<user_params->N_THREADS; i++) {
gsl_rng_free (r[i]);
}
LOG_DEBUG("global_xH = %e",global_xH);
fftwf_free(deltax_unfiltered);
fftwf_free(deltax_unfiltered_original);
fftwf_free(deltax_filtered);
if(flag_options->USE_MINI_HALOS){
fftwf_free(prev_deltax_unfiltered);
fftwf_free(prev_deltax_filtered);
}
if(flag_options->USE_TS_FLUCT) {
fftwf_free(xe_unfiltered);
fftwf_free(xe_filtered);
}
if (flag_options->INHOMO_RECO){
fftwf_free(N_rec_unfiltered);
fftwf_free(N_rec_filtered);
}
if(flag_options->USE_HALO_FIELD) {
fftwf_free(M_coll_unfiltered);
fftwf_free(M_coll_filtered);
}
LOG_SUPER_DEBUG("freed fftw boxes");
if(flag_options->USE_MASS_DEPENDENT_ZETA) {
free(xi_SFR);
free(wi_SFR);
if(user_params->USE_INTERPOLATION_TABLES) {
free(log10_overdense_spline_SFR);
free(Overdense_spline_SFR);
free(log10_Nion_spline);
free(Nion_spline);
}
if(flag_options->USE_MINI_HALOS){
free(Mturns);
free(Mturns_MINI);
fftwf_free(log10_Mturnover_unfiltered);
fftwf_free(log10_Mturnover_filtered);
fftwf_free(log10_Mturnover_MINI_unfiltered);
fftwf_free(log10_Mturnover_MINI_filtered);
if(user_params->USE_INTERPOLATION_TABLES) {
free(prev_log10_overdense_spline_SFR);
free(prev_Overdense_spline_SFR);
free(prev_log10_Nion_spline);
free(prev_Nion_spline);
free(log10_Nion_spline_MINI);
free(Nion_spline_MINI);
free(prev_log10_Nion_spline_MINI);
free(prev_Nion_spline_MINI);
}
}
//fftwf_free(Mcrit_RE_grid);
//fftwf_free(Mcrit_LW_grid);
}
if (prev_redshift < 1){
free(previous_ionize_box->z_re_box);
if (flag_options->USE_MASS_DEPENDENT_ZETA && flag_options->USE_MINI_HALOS){
free(previous_ionize_box->Gamma12_box);
free(previous_ionize_box->dNrec_box);
free(previous_ionize_box->Fcoll);
free(previous_ionize_box->Fcoll_MINI);
}
}
if(!flag_options->USE_TS_FLUCT && user_params->USE_INTERPOLATION_TABLES) {
freeSigmaMInterpTable();
}
free(overdense_int_boundexceeded_threaded);
LOG_DEBUG("finished!\n");
} // End of Try()
Catch(status){
return(status);
}
return(0);
}
int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI) {
int overdense_int,overdense_int_status;
float dens_val, small_bin_width, small_bin_width_inv, small_min;
float log10_Mturnover, log10_Mturnover_MINI;
int log10_Mturnover_int, log10_Mturnover_MINI_int;
overdense_int_status = 0;
if(MINI_HALOS) {
log10_Mturnover = (filtered_Mturn - log10Mturn_min ) * log10Mturn_bin_width_inv;
log10_Mturnover_int = (int)floorf( log10_Mturnover );
log10_Mturnover_MINI = (filtered_Mturn_MINI - log10Mturn_min_MINI ) * log10Mturn_bin_width_inv_MINI;
log10_Mturnover_MINI_int = (int)floorf( log10_Mturnover_MINI );
}
if(dens_type==1) {
small_min = overdense_small_min;
small_bin_width = overdense_small_bin_width;
small_bin_width_inv = overdense_small_bin_width_inv;
}
if(dens_type==2) {
small_min = prev_overdense_small_min;
small_bin_width = prev_overdense_small_bin_width;
small_bin_width_inv = prev_overdense_small_bin_width_inv;
}
if (curr_dens < global_params.CRIT_DENS_TRANSITION) {
if (curr_dens <= -1.) {
*Splined_Fcoll = 0;
if(MINI_HALOS) {
*Splined_Fcoll_MINI = 0;
}
} else {
dens_val = (log10f(curr_dens + 1.) - small_min) * small_bin_width_inv;
overdense_int = (int) floorf(dens_val);
if (overdense_int < 0 || (overdense_int + 1) > (NSFR_low - 1)) {
overdense_int_status = 1;
LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val);
}
if(MINI_HALOS) {
if(dens_type==1) {
*Splined_Fcoll = ( \
log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \
log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \
( \
log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover - (float)log10_Mturnover_int);
*Splined_Fcoll_MINI = ( \
log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \
log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \
( \
log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int);
}
if(dens_type==2) {
*Splined_Fcoll = ( \
prev_log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \
prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \
( \
prev_log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover - (float)log10_Mturnover_int);
*Splined_Fcoll_MINI = ( \
prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \
prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \
( \
prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int);
}
*Splined_Fcoll_MINI = expf(*Splined_Fcoll_MINI);
}
else {
*Splined_Fcoll = log10_Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + log10_Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int);
}
*Splined_Fcoll = expf(*Splined_Fcoll);
}
}
else {
if (curr_dens < 0.99 * Deltac) {
if(dens_type==1) {
dens_val = (curr_dens - overdense_large_min) * overdense_large_bin_width_inv;
}
if(dens_type==2) {
dens_val = (curr_dens - prev_overdense_large_min) * prev_overdense_large_bin_width_inv;
}
overdense_int = (int) floorf(dens_val);
if (overdense_int < 0 || (overdense_int + 1) > (NSFR_high - 1)) {
overdense_int_status = 1;
LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val);
}
if(MINI_HALOS) {
if(dens_type==1) {
*Splined_Fcoll = ( \
Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \
Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \
( \
Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover - (float)log10_Mturnover_int);
*Splined_Fcoll_MINI = ( \
Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \
Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \
( \
Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int);
}
if(dens_type==2) {
*Splined_Fcoll = ( \
prev_Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \
prev_Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \
( \
prev_Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
prev_Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover - (float)log10_Mturnover_int);
*Splined_Fcoll_MINI = ( \
prev_Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \
prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \
) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \
( \
prev_Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \
prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \
) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int);
}
}
else {
*Splined_Fcoll = Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int);
}
}
else {
*Splined_Fcoll = 1.;
if(MINI_HALOS) {
*Splined_Fcoll_MINI = 1.;
}
}
}
return overdense_int_status;
}
void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density) {
float small_bin_width, small_bin_width_inv, small_min;
if (*min_density < 0.) {
*min_density = *min_density * 1.001;
if (*min_density <= -1.) {
// Use MIN_DENSITY_LOW_LIMIT as is it smaller than FRACT_FLOAT_ERR
*min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
}
} else {
*min_density = *min_density * 0.999;
}
if (*max_density < 0.) {
*max_density = *max_density * 0.999;
} else {
*max_density = *max_density * 1.001;
}
if (global_params.HII_FILTER == 1) {
if ((0.413566994 * R * 2. * PI / L) > 1.) {
// The sharp k-space filter will set every cell to zero, and the interpolation table using a flexible min/max density will fail.
*min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
*max_density = global_params.CRIT_DENS_TRANSITION * 1.001;
}
}
small_min = log10(1. + *min_density);
if (*max_density > global_params.CRIT_DENS_TRANSITION * 1.001) {
small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + global_params.CRIT_DENS_TRANSITION * 1.001) - small_min);
} else {
small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + *max_density) - small_min);
}
small_bin_width_inv = 1./small_bin_width;
if(dens_type==1) {
overdense_small_min = small_min;
overdense_small_bin_width = small_bin_width;
overdense_small_bin_width_inv = small_bin_width_inv;
LOG_ULTRA_DEBUG("R=%f, min_density=%f, max_density=%f, overdense_small_min=%f, overdense_small_bin_width=%f",\
R, *min_density, *max_density, small_min, small_bin_width);
}
if(dens_type==2) {
prev_overdense_small_min = small_min;
prev_overdense_small_bin_width = small_bin_width;
prev_overdense_small_bin_width_inv = small_bin_width_inv;
LOG_ULTRA_DEBUG("R=%f, prev_min_density=%f, prev_max_density=%f, prev_overdense_small_min=%f, prev_overdense_small_bin_width=%f",\
R, *min_density, *max_density, small_min, small_bin_width);
}
}
|
MeshRefiner.h | /**
* @file
* This file is part of SeisSol.
*
* @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger)
*
* @section LICENSE
* Copyright (c) 2015, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
*/
#ifndef MESH_REFINER_H_
#define MESH_REFINER_H_
#include <cstring>
#include "Geometry/MeshReader.h"
#include "RefinerUtils.h"
namespace seissol
{
namespace refinement
{
//------------------------------------------------------------------------------
template<typename T>
class MeshRefiner
{
private:
// m_cells contains the indices of the cells
unsigned int* m_cells;
T* m_vertices;
size_t m_numSubCells;
size_t m_numVertices;
static const unsigned int kIndicesPerCell = 4;
const unsigned int kSubCellsPerCell;
public:
MeshRefiner(const MeshReader& meshReader,
const TetrahedronRefiner<T>& tetRefiner);
MeshRefiner(const std::vector<const Element *>& subElements,
const std::vector<const Vertex *>& subVertices,
const std::map<int, int>& oldToNewVertexMap,
const TetrahedronRefiner<T>& tetRefiner);
~MeshRefiner();
const unsigned int* getCellData() const;
const T* getVertexData() const;
std::size_t getNumCells() const;
std::size_t getNumVertices() const;
};
//------------------------------------------------------------------------------
template<typename T>
MeshRefiner<T>::MeshRefiner(
const MeshReader& meshReader,
const TetrahedronRefiner<T>& tetRefiner)
: kSubCellsPerCell(tetRefiner.getDivisionCount())
{
using std::size_t;
const size_t kInVertexCount = meshReader.getVertices().size();
const size_t kInCellCount = meshReader.getElements().size();
m_numSubCells = kInCellCount * kSubCellsPerCell;
const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell();
m_numVertices = kInVertexCount + kInCellCount * additionalVertices;
m_cells = new unsigned int[m_numSubCells * kIndicesPerCell];
m_vertices = new T[m_numVertices * 3];
const std::vector<Vertex>& kVertices = meshReader.getVertices();
const std::vector<Element>& kElements = meshReader.getElements();
// Copy original vertices
#ifdef _OPENMP
#pragma omp parallel for
#endif // _OPENMP
for (unsigned int i = 0; i < kInVertexCount; i++) {
memcpy(&m_vertices[i*3], kVertices[i].coords, sizeof(double)*3);
}
// The pointer to the new vertices
T* newVertices = &m_vertices[kInVertexCount*3];
// Start the actual cell-refinement
#ifdef _OPENMP
#pragma omp parallel
{
#endif // _OPENMPI
glm::tvec3<T>* newVerticesTmp = new glm::tvec3<T>[additionalVertices];
Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell];
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif // _OPENMP
for (size_t c = 0; c < kInCellCount; ++c)
{
// Build a Terahedron containing the coordinates of the vertices.
Tetrahedron<T> inTet = Tetrahedron<T>(
kVertices[kElements[c].vertices[0]].coords,
kVertices[kElements[c].vertices[1]].coords,
kVertices[kElements[c].vertices[2]].coords,
kVertices[kElements[c].vertices[3]].coords,
kElements[c].vertices[0],
kElements[c].vertices[1],
kElements[c].vertices[2],
kElements[c].vertices[3]);
// Generate the tets
tetRefiner.refine(inTet,
kInVertexCount + c*additionalVertices,
newTetsTmp, newVerticesTmp);
// Copy new vertices
for (unsigned int i = 0; i < additionalVertices; i++) {
memcpy(&newVertices[(c*additionalVertices + i) * 3],
glm::value_ptr(newVerticesTmp[i]), sizeof(T)*3);
}
// Copy tets
for (unsigned int i = 0; i < kSubCellsPerCell; i++) {
m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i;
m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j;
m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k;
m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l;
}
}
delete [] newVerticesTmp;
delete [] newTetsTmp;
#ifdef _OPENMP
}
#endif
};
template<typename T>
MeshRefiner<T>::MeshRefiner(
const std::vector<const Element *>& subElements,
const std::vector<const Vertex *>& subVertices,
const std::map<int, int>& oldToNewVertexMap,
const TetrahedronRefiner<T>& tetRefiner)
: kSubCellsPerCell(tetRefiner.getDivisionCount())
{
using std::size_t;
const size_t kInVertexCount = subVertices.size();
const size_t kInCellCount = subElements.size();
m_numSubCells = kInCellCount * kSubCellsPerCell;
const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell();
m_numVertices = kInVertexCount + kInCellCount * additionalVertices;
m_cells = new unsigned int[m_numSubCells * kIndicesPerCell];
m_vertices = new T[m_numVertices * 3];
const std::vector<const Vertex*>& kVertices = subVertices;
const std::vector<const Element*>& kElements = subElements;
// Copy original vertices
#ifdef _OPENMP
#pragma omp parallel for
#endif // _OPENMP
for (unsigned int i = 0; i < kInVertexCount; i++) {
memcpy(&m_vertices[i*3], kVertices[i]->coords, sizeof(double)*3);
}
// The pointer to the new vertices
T* newVertices = &m_vertices[kInVertexCount*3];
// Start the actual cell-refinement
#ifdef _OPENMP
#pragma omp parallel shared(oldToNewVertexMap)
{
#endif // _OPENMPI
glm::tvec3<T>* newVerticesTmp = new glm::tvec3<T>[additionalVertices];
Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell];
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif // _OPENMP
for (size_t c = 0; c < kInCellCount; ++c)
{
// Build a Terahedron containing the coordinates of the vertices.
Tetrahedron<T> inTet = Tetrahedron<T>(
kVertices[oldToNewVertexMap.at(kElements[c]->vertices[0])]->coords,
kVertices[oldToNewVertexMap.at(kElements[c]->vertices[1])]->coords,
kVertices[oldToNewVertexMap.at(kElements[c]->vertices[2])]->coords,
kVertices[oldToNewVertexMap.at(kElements[c]->vertices[3])]->coords,
oldToNewVertexMap.at(kElements[c]->vertices[0]),
oldToNewVertexMap.at(kElements[c]->vertices[1]),
oldToNewVertexMap.at(kElements[c]->vertices[2]),
oldToNewVertexMap.at(kElements[c]->vertices[3]));
// Generate the tets
tetRefiner.refine(inTet,
kInVertexCount + c*additionalVertices,
newTetsTmp, newVerticesTmp);
// Copy new vertices
for (unsigned int i = 0; i < additionalVertices; i++) {
memcpy(&newVertices[(c*additionalVertices + i) * 3],
glm::value_ptr(newVerticesTmp[i]), sizeof(T)*3);
}
// Copy tets
for (unsigned int i = 0; i < kSubCellsPerCell; i++) {
m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i;
m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j;
m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k;
m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l;
}
}
delete [] newVerticesTmp;
delete [] newTetsTmp;
#ifdef _OPENMP
}
#endif
};
template<typename T>
MeshRefiner<T>::~MeshRefiner()
{
delete [] m_cells;
delete [] m_vertices;
}
//------------------------------------------------------------------------------
template<typename T>
const unsigned int* MeshRefiner<T>::getCellData() const {
return &m_cells[0];
}
//------------------------------------------------------------------------------
template<typename T>
const T* MeshRefiner<T>::getVertexData() const {
return &m_vertices[0];
}
//------------------------------------------------------------------------------
template<typename T>
std::size_t MeshRefiner<T>::getNumCells() const {
return m_numSubCells;
}
//------------------------------------------------------------------------------
template<typename T>
std::size_t MeshRefiner<T>::getNumVertices() const {
return m_numVertices;
}
//------------------------------------------------------------------------------
} // namespace
}
#endif // MESH_REFINER_H_
|
omp_workshare1.c | /******************************************************************************
* FILE: omp_workshare1.c
* DESCRIPTION:
* OpenMP Example - Loop Work-sharing - C/C++ Version
* In this example, the iterations of a loop are scheduled dynamically
* across the team of threads. A thread will perform CHUNK iterations
* at a time before being scheduled for the next CHUNK of work.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp for schedule(dynamic,chunk)
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
} /* end of parallel section */
}
|
scheduleg-clause.c | /*
* sheduleg-clause.c
*
* Created on: 28/04/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 20, chunk, hebras = 1, a[n], suma = 0;
if (argc < 4) {
if (argv[1]==NULL){
fprintf(stderr, "[ERROR]-Falta iteraciones\n");
exit(-1);
}else if (argv[2]==NULL){
fprintf(stderr, "[ERROR]-Falta chunk\n");
exit(-1);
}else if (argv[3]==NULL){
fprintf(stderr, "[ERROR]-Falta numero de hebras\n");
exit(-1);
}else{
printf("[USAGE]-%s [num iteraciones] [num chunk] [num hebras]\n",argv[0]);
}
}
n = atoi(argv[1]);
if (n > 20)
n = 20;
chunk = atoi(argv[2]);
hebras = atoi(argv[3]);
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel for num_threads(hebras) firstprivate(suma) lastprivate(suma) schedule(guided,chunk)
for (i = 0; i < n; i++) {
suma = suma + a[i];
if (argv[4]==NULL)
printf(" La iteración %d la realiza thread %d suma a[%d]=%d suma=%d \n", i, omp_get_thread_num(), i, a[i], suma);
else if (atoi(argv[4])==1)
printf("%d %d\n", i, omp_get_thread_num());
}
printf("Fuera de 'parallel for' suma=%d\n", suma);
return 0;
}
|
x_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "work_lhs.h"
#include "timers.h"
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
// printf("xxxxxx\n");
int i, j, k, m, n, isize;
//kai
// int k12;
// consistent_data(&k12, "int", 1);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
if (timeron) timer_start(t_xsolve);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0]-1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n)
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 0; i <= isize; i++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 );
fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 );
fjac[i][4][1] = c2;
fjac[i][0][2] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * tmp2 );
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( u[k][j][i][1]*u[k][j][i][1] * tmp2 + qs[k][j][i] );
fjac[i][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * tmp2;
fjac[i][3][4] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * tmp2;
fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 );
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = - ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][1];
njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[i][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
for (i = 1; i <= isize-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][AA][0][0] = - tmp2 * fjac[i-1][0][0]
- tmp1 * njac[i-1][0][0]
- tmp1 * dx1;
lhs[i][AA][1][0] = - tmp2 * fjac[i-1][1][0]
- tmp1 * njac[i-1][1][0];
lhs[i][AA][2][0] = - tmp2 * fjac[i-1][2][0]
- tmp1 * njac[i-1][2][0];
lhs[i][AA][3][0] = - tmp2 * fjac[i-1][3][0]
- tmp1 * njac[i-1][3][0];
lhs[i][AA][4][0] = - tmp2 * fjac[i-1][4][0]
- tmp1 * njac[i-1][4][0];
lhs[i][AA][0][1] = - tmp2 * fjac[i-1][0][1]
- tmp1 * njac[i-1][0][1];
lhs[i][AA][1][1] = - tmp2 * fjac[i-1][1][1]
- tmp1 * njac[i-1][1][1]
- tmp1 * dx2;
lhs[i][AA][2][1] = - tmp2 * fjac[i-1][2][1]
- tmp1 * njac[i-1][2][1];
lhs[i][AA][3][1] = - tmp2 * fjac[i-1][3][1]
- tmp1 * njac[i-1][3][1];
lhs[i][AA][4][1] = - tmp2 * fjac[i-1][4][1]
- tmp1 * njac[i-1][4][1];
lhs[i][AA][0][2] = - tmp2 * fjac[i-1][0][2]
- tmp1 * njac[i-1][0][2];
lhs[i][AA][1][2] = - tmp2 * fjac[i-1][1][2]
- tmp1 * njac[i-1][1][2];
lhs[i][AA][2][2] = - tmp2 * fjac[i-1][2][2]
- tmp1 * njac[i-1][2][2]
- tmp1 * dx3;
lhs[i][AA][3][2] = - tmp2 * fjac[i-1][3][2]
- tmp1 * njac[i-1][3][2];
lhs[i][AA][4][2] = - tmp2 * fjac[i-1][4][2]
- tmp1 * njac[i-1][4][2];
lhs[i][AA][0][3] = - tmp2 * fjac[i-1][0][3]
- tmp1 * njac[i-1][0][3];
lhs[i][AA][1][3] = - tmp2 * fjac[i-1][1][3]
- tmp1 * njac[i-1][1][3];
lhs[i][AA][2][3] = - tmp2 * fjac[i-1][2][3]
- tmp1 * njac[i-1][2][3];
lhs[i][AA][3][3] = - tmp2 * fjac[i-1][3][3]
- tmp1 * njac[i-1][3][3]
- tmp1 * dx4;
lhs[i][AA][4][3] = - tmp2 * fjac[i-1][4][3]
- tmp1 * njac[i-1][4][3];
lhs[i][AA][0][4] = - tmp2 * fjac[i-1][0][4]
- tmp1 * njac[i-1][0][4];
lhs[i][AA][1][4] = - tmp2 * fjac[i-1][1][4]
- tmp1 * njac[i-1][1][4];
lhs[i][AA][2][4] = - tmp2 * fjac[i-1][2][4]
- tmp1 * njac[i-1][2][4];
lhs[i][AA][3][4] = - tmp2 * fjac[i-1][3][4]
- tmp1 * njac[i-1][3][4];
lhs[i][AA][4][4] = - tmp2 * fjac[i-1][4][4]
- tmp1 * njac[i-1][4][4]
- tmp1 * dx5;
lhs[i][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][CC][0][0] = tmp2 * fjac[i+1][0][0]
- tmp1 * njac[i+1][0][0]
- tmp1 * dx1;
lhs[i][CC][1][0] = tmp2 * fjac[i+1][1][0]
- tmp1 * njac[i+1][1][0];
lhs[i][CC][2][0] = tmp2 * fjac[i+1][2][0]
- tmp1 * njac[i+1][2][0];
lhs[i][CC][3][0] = tmp2 * fjac[i+1][3][0]
- tmp1 * njac[i+1][3][0];
lhs[i][CC][4][0] = tmp2 * fjac[i+1][4][0]
- tmp1 * njac[i+1][4][0];
lhs[i][CC][0][1] = tmp2 * fjac[i+1][0][1]
- tmp1 * njac[i+1][0][1];
lhs[i][CC][1][1] = tmp2 * fjac[i+1][1][1]
- tmp1 * njac[i+1][1][1]
- tmp1 * dx2;
lhs[i][CC][2][1] = tmp2 * fjac[i+1][2][1]
- tmp1 * njac[i+1][2][1];
lhs[i][CC][3][1] = tmp2 * fjac[i+1][3][1]
- tmp1 * njac[i+1][3][1];
lhs[i][CC][4][1] = tmp2 * fjac[i+1][4][1]
- tmp1 * njac[i+1][4][1];
lhs[i][CC][0][2] = tmp2 * fjac[i+1][0][2]
- tmp1 * njac[i+1][0][2];
lhs[i][CC][1][2] = tmp2 * fjac[i+1][1][2]
- tmp1 * njac[i+1][1][2];
lhs[i][CC][2][2] = tmp2 * fjac[i+1][2][2]
- tmp1 * njac[i+1][2][2]
- tmp1 * dx3;
lhs[i][CC][3][2] = tmp2 * fjac[i+1][3][2]
- tmp1 * njac[i+1][3][2];
lhs[i][CC][4][2] = tmp2 * fjac[i+1][4][2]
- tmp1 * njac[i+1][4][2];
lhs[i][CC][0][3] = tmp2 * fjac[i+1][0][3]
- tmp1 * njac[i+1][0][3];
lhs[i][CC][1][3] = tmp2 * fjac[i+1][1][3]
- tmp1 * njac[i+1][1][3];
lhs[i][CC][2][3] = tmp2 * fjac[i+1][2][3]
- tmp1 * njac[i+1][2][3];
lhs[i][CC][3][3] = tmp2 * fjac[i+1][3][3]
- tmp1 * njac[i+1][3][3]
- tmp1 * dx4;
lhs[i][CC][4][3] = tmp2 * fjac[i+1][4][3]
- tmp1 * njac[i+1][4][3];
lhs[i][CC][0][4] = tmp2 * fjac[i+1][0][4]
- tmp1 * njac[i+1][0][4];
lhs[i][CC][1][4] = tmp2 * fjac[i+1][1][4]
- tmp1 * njac[i+1][1][4];
lhs[i][CC][2][4] = tmp2 * fjac[i+1][2][4]
- tmp1 * njac[i+1][2][4];
lhs[i][CC][3][4] = tmp2 * fjac[i+1][3][4]
- tmp1 * njac[i+1][3][4];
lhs[i][CC][4][4] = tmp2 * fjac[i+1][4][4]
- tmp1 * njac[i+1][4][4]
- tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (i = 1; i <= isize-1; i++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][AA], rhs[k][j][i-1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][AA], lhs[i-1][CC], lhs[i][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][AA], rhs[k][j][isize-1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[isize][BB], rhs[k][j][isize] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
for (i = isize-1; i >=0; i--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[i][CC][n][m]*rhs[k][j][i+1][n];
}
}
}
}
//kai
k12 = k;
// printf("k12=%p\n", &k12);
}
if (timeron) timer_stop(t_xsolve);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(16*t3+Nx+12,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),32*t4+30),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
mat_mul_p4a_5000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 4999; i += 1)
for(j = 0; j <= 4999; j += 1) {
c[i*5000+j] = 0;
for(k = 0; k <= 4999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*5000+j] += a[i*5000+k]*b[j*5000+k];
}
return;
}
|
GB_unop__identity_fc64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_bool
// op(A') function: GB_unop_tran__identity_fc64_bool
// C type: GxB_FC64_t
// A type: bool
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_bool
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
v4f32 _sum = (v4f32)__msa_fill_w(0);
const float* kptr = (const float*)weight_data_pack4to1.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr + space_ofs[k] * 4, 0);
v4f32 _w = (v4f32)__msa_ld_w(kptr, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
kptr += 4;
}
}
sum += __msa_fhadd_w(_sum);
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
GB_binop__plus_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_int64
// A.*B function (eWiseMult): GB_AemultB__plus_int64
// A*D function (colscale): GB_AxD__plus_int64
// D*A function (rowscale): GB_DxB__plus_int64
// C+=B function (dense accum): GB_Cdense_accumB__plus_int64
// C+=b function (dense accum): GB_Cdense_accumb__plus_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_int64
// C=scalar+B GB_bind1st__plus_int64
// C=scalar+B' GB_bind1st_tran__plus_int64
// C=A+scalar GB_bind2nd__plus_int64
// C=A'+scalar GB_bind2nd_tran__plus_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT64 || GxB_NO_PLUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
4.collapse.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 5
/* Q1: Which iterations of the loops are executed by each thread */
/* when the collapse clause is used? */
/* Q2: Is the execution correct if we remove the collapse clause? */
/* Add the appropriate clause to make it correct. */
int main()
{
int i,j;
omp_set_num_threads(8);
#pragma omp parallel for collapse(2)
for (i=0; i < N; i++) {
for (j=0; j < N; j++) {
int id=omp_get_thread_num();
printf("(%d) Iter (%d %d)\n",id,i,j);
}
}
return 0;
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#ifndef SERIAL
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#else
#define cilk_spawn
#define cilk_sync
#define __cilkrts_accum_timing()
#define __cilkrts_set_pinning_info(n)
#define __cilkrts_unset_pinning_info()
#define __cilkrts_pin_top_level_frame_at_socket(n)
#define __cilkrts_init()
#define __cilkrts_get_nworkers() 1
#define __cilkrts_num_sockets() 1
#define __cilkrts_reset_timing()
#define cilk_for for
#endif
#include "npb-C.h"
#include "npbparams.h"
#include "numa_allocate.h"
#include <unistd.h>
#include <sched.h>
#include <numa.h>
#include <numaif.h>
#ifndef TIMING_COUNT
#define TIMING_COUNT 0
#endif
#if TIMING_COUNT
#include "ktiming.h"
clockmark_t begin, end;
uint64_t *elapsed;
#endif
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static double v[NA+1+1]; /* v[1:NA+1] */
static double aelt[NZ+1]; /* aelt[1:NZ] */
static double a[NZ+1]; /* a[1:NZ] */
static double x[NA+2+1]; /* x[1:NA+2] */
static double z[NA+2+1]; /* z[1:NA+2] */
static double p[NA+2+1]; /* p[1:NA+2] */
static double q[NA+2+1]; /* q[1:NA+2] */
static double r[NA+2+1]; /* r[1:NA+2] */
//static double w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static double amult;
static double tran;
/* function declarations */
static void conj_grad (int colidx[], int rowstr[], double x[], double z[],
double a[], double p[], double q[], double r[],
//double w[],
double *rnorm);
static void makea(int n, int nz, double a[], int colidx[], int rowstr[],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, double rcond, int arow[], int acol[],
double aelt[], double v[], int iv[], double shift );
static void sparse(double a[], int colidx[], int rowstr[], int n,
int arow[], int acol[], double aelt[],
int firstrow, int lastrow,
double x[], boolean mark[], int nzloc[], int nnza);
static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(double x, int ipwr2);
const unsigned long BASE_CASE = 512;
double reduce_add_mul(double init_sum, double *arr1, double *arr2, unsigned long i, unsigned long e){
if( e - i < BASE_CASE){
for(int j = i; j < e; j++){
init_sum += arr1[j] * arr2[j];
}
return init_sum;
}
unsigned long mid = (i + e) / 2;
double k = cilk_spawn reduce_add_mul(init_sum, arr1, arr2, i, mid);
double l = reduce_add_mul(init_sum, arr1, arr2, mid, e);
cilk_sync;
return k+l;
}
#ifndef NO_PIN
void map_mul_scalar(double *res, double *A, double alpha, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
if( e - i < BASE_CASE){
for(int j = i; j < e; j++){
res[j] = A[j] * alpha;
}
return;
}
cilk_spawn map_mul_scalar(res, A, alpha, i, mid);
map_mul_scalar(res, A, alpha, mid, e);
cilk_sync;
return;
}
#endif
const unsigned long IDX_MUL_BASE_CASE = BASE_CASE;
double reduce_add_mul_idx(double init_sum, double *arr1, double *arr2, int *idx, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
//printf("here: e- i : %d\n", e-i);
if( e - i < IDX_MUL_BASE_CASE){
for(int j = i; j < e; j++){
init_sum += arr1[j] * arr2[idx[j]];
}
return init_sum;
}
double k = cilk_spawn reduce_add_mul_idx(init_sum, arr1, arr2, idx, i, mid);
double l = reduce_add_mul_idx(init_sum, arr1, arr2, idx, mid, e);
cilk_sync;
return k+l;
}
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
#ifndef NO_PIN
static int sockets;
static int mem_pattern[] = {0, 0, 0, 0, 0, 0, 0, 0};
static int pin_pattern[] = {0, 0, 0, 0, 0, 0, 0, 0};
#endif
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
#ifndef NO_PIN
#define SET_PIN(N) __cilkrts_set_pinning_info(N)
#endif
int fakemain(int argc, char **argv, int run) {
int i, j, k, it;
int nthreads = 1;
double zeta;
double rnorm;
double norm_temp11;
double norm_temp12;
double t, mflops;
char class;
boolean verified;
double zeta_verify_value, epsilon;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
class = 'C';
zeta_verify_value = 28.973605592845;
} else {
class = 'U';
}
//printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
// " - CG Benchmark\n");
//printf(" Size: %10d\n", NA);
//printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
//printf("data gen\n");
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
//printf("data gen end\n");
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
cilk_for (unsigned long j = 1; j <= lastrow - firstrow + 1; j++) {
for (unsigned long k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol + 1;
}
}
//printf("end colidx\n");
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
cilk_for (unsigned long i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
zeta = 0.0;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
#ifndef NO_PIN
int rNZ = rowstr[lastrow-firstrow+2];
int sockets = __cilkrts_num_sockets();
//colidx
unsigned long long colidx_pages = (rNZ+1)*sizeof(int)/getpagesize()+1;
int *colidx_numa = (int *) pattern_bind_memory_numa(colidx_pages, sockets, mem_pattern);
cilk_for(int i =0;i < rNZ+1; i++){
colidx_numa[i] = colidx[i];
}
//rowstr
unsigned long long rowstr_pages = (NA+1+1)*sizeof(int)/getpagesize()+1;
//printf("rowstr pages: %d\n", rowstr_pages);
int *rowstr_numa = (int *) pattern_bind_memory_numa(rowstr_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+1+1; i++){
rowstr_numa[i] = rowstr[i];
}
//x
unsigned long long x_pages = (NA+2+1)*sizeof(double)/getpagesize()+1;
double *x_numa = (double *) pattern_bind_memory_numa(x_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+2+1; i++){
x_numa[i] = x[i];
}
//z
unsigned long long z_pages = (NA+2+1)*sizeof(double)/getpagesize()+1;
double *z_numa = (double *) pattern_bind_memory_numa(z_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+2+1; i++){
z_numa[i] = z[i];
}
//a
unsigned long long a_pages = (rNZ+1)*sizeof(double)/getpagesize()+1;
double *a_numa = (double *) pattern_bind_memory_numa(a_pages, sockets, mem_pattern);
cilk_for(int i =0;i < rNZ+1; i++){
a_numa[i] = a[i];
}
//p
unsigned long long p_pages = (NA+2+1)*sizeof(double)/getpagesize()+1;
double *p_numa = (double *) pattern_bind_memory_numa(p_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+2+1; i++){
p_numa[i] = p[i];
}
//q
unsigned long long q_pages = (NA+2+1)*sizeof(double)/getpagesize()+1;
double *q_numa = (double *) pattern_bind_memory_numa(q_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+2+1; i++){
q_numa[i] = q[i];
}
//r
unsigned long long r_pages = (NA+2+1)*sizeof(double)/getpagesize()+1;
double *r_numa = (double *) pattern_bind_memory_numa(r_pages, sockets, mem_pattern);
cilk_for(int i =0;i < NA+2+1; i++){
r_numa[i] = r[i];
}
//unsigned long midt1 = (lastcol-firstcol+2)/2;
//unsigned long midt11 = (1 + midt1)/2;
//unsigned long midt12 = (midt1 + lastcol-firstcol+2)/2;
unsigned long unit = (lastcol-firstcol+2)/sockets;
#endif
for (it = 1; it <= 1; it++) {
//printf("in first iter\n");
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
#ifndef NO_PIN
conj_grad (colidx_numa, rowstr_numa, x_numa, z_numa, a_numa, p_numa, q_numa, r_numa,/* w,*/ &rnorm);
#else
conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);
#endif
//printf("end conj grad\n");
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#ifndef NO_PIN
double norm_temp11_s[sockets];
double norm_temp12_s[sockets];
int start_spawn0 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
norm_temp11_s[i] = cilk_spawn reduce_add_mul(norm_temp11, x_numa, z_numa, start_spawn0, start_spawn0+unit); // @0
SET_PIN(pin_pattern[i+1]);
norm_temp12_s[i] = cilk_spawn reduce_add_mul(norm_temp12, z_numa, z_numa, start_spawn0, start_spawn0+unit); // @0
}else{
norm_temp11_s[i] = cilk_spawn reduce_add_mul(norm_temp11, x_numa, z_numa, start_spawn0, lastcol-firstcol+2); // @3
norm_temp12_s[i] = reduce_add_mul(norm_temp12, z_numa, z_numa, start_spawn0, lastcol-firstcol+2); // @3
__cilkrts_unset_pinning_info();
}
start_spawn0 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for(int i = 0; i < sockets; i ++){
norm_temp11 += norm_temp11_s[i];
norm_temp12 += norm_temp12_s[i];
}
#else
norm_temp11 = cilk_spawn reduce_add_mul(norm_temp11, x, z, 1, lastcol-firstcol+2);
norm_temp12 = reduce_add_mul(norm_temp12, z, z, 1, lastcol-firstcol+2);
cilk_sync;
#endif
norm_temp12 = 1.0 / sqrt( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) {
#ifndef NO_PIN
x_numa[j] = norm_temp12*z_numa[j];
#else
x[j] = norm_temp12*z[j];
#endif
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
cilk_for (unsigned long i = 1; i <= NA+1; i++) {
#ifndef NO_PIN
x_numa[i] = 1.0;
#else
x[i] = 1.0;
#endif
}
zeta = 0.0;
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
//printf("entering main iter\n");
__cilkrts_reset_timing();
timer_clear( 1 );
//timer_start( 1 );
#if TIMING_COUNT
begin = ktiming_getmark();
#endif
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
#ifndef NO_PIN
conj_grad(colidx_numa, rowstr_numa, x_numa, z_numa, a_numa, p_numa, q_numa, r_numa/*, w*/, &rnorm);
#else
conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm);
#endif
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#ifndef NO_PIN
double norm_temp11_s[sockets];
double norm_temp12_s[sockets];
int start_spawn1 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
norm_temp11_s[i] = cilk_spawn reduce_add_mul(norm_temp11, x_numa, z_numa, start_spawn1, start_spawn1+unit); // @0
SET_PIN(pin_pattern[i+1]);
norm_temp12_s[i] = cilk_spawn reduce_add_mul(norm_temp12, z_numa, z_numa, start_spawn1, start_spawn1+unit); // @0
}else{
norm_temp11_s[i] = cilk_spawn reduce_add_mul(norm_temp11, x_numa, z_numa, start_spawn1, lastcol-firstcol+2); // @3
norm_temp12_s[i] = reduce_add_mul(norm_temp12, z_numa, z_numa, start_spawn1, lastcol-firstcol+2); // @3
__cilkrts_unset_pinning_info();
}
start_spawn1 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for(int i = 0; i < sockets; i ++){
norm_temp11 += norm_temp11_s[i];
norm_temp12 += norm_temp12_s[i];
}
#else
norm_temp11 = cilk_spawn reduce_add_mul(norm_temp11, x, z, 1, lastcol-firstcol+2);
norm_temp12 = reduce_add_mul(norm_temp12, z, z, 1, lastcol-firstcol+2);
cilk_sync;
#endif
norm_temp12 = 1.0 / sqrt( norm_temp12 );
zeta = SHIFT + 1.0 / norm_temp11;
/*
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
*/
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
/* cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) { */
/* x[j] = norm_temp12*z[j]; */
/* } */
#ifndef NO_PIN
int start_spawn2 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
cilk_spawn map_mul_scalar(x_numa, z_numa, norm_temp12, start_spawn2, start_spawn2+unit); // @0
}else{
map_mul_scalar(x_numa, z_numa, norm_temp12, start_spawn2, lastcol-firstcol+2); // @3
__cilkrts_unset_pinning_info();
}
start_spawn2 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
#else
cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
#endif
}
/* end of main iter inv pow meth */
nthreads = __cilkrts_get_nworkers();
//timer_stop( 1 );
#if TIMING_COUNT
end = ktiming_getmark();
elapsed[run] = ktiming_diff_usec(&begin, &end);
#endif
__cilkrts_accum_timing();
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
//printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (class != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
//printf(" VERIFICATION SUCCESSFUL\n");
//printf(" Zeta is %20.12e\n", zeta);
//printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
//printf(" VERIFICATION FAILED\n");
//printf(" Zeta %20.12e\n", zeta);
//printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
//printf(" Problem size unknown\n");
//printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
return 0;
}
const unsigned long INIT_BASE_CASE = BASE_CASE;
void initialize(double *q, double *z, double *r, double *p, double *x, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
if(e - i <= INIT_BASE_CASE){
for(int j = i; j < e; j++){
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
return;
}
cilk_spawn initialize(q, z, r, p, x, i, mid);
initialize(q, z, r, p, x, mid, e);
cilk_sync;
}
/* cilk_for (unsigned long j = 1; j <= lastrow-firstrow+1; j++) { */
/* sum = 0.0; */
/* for (k = rowstr[j]; k < rowstr[j+1]; k++) { */
/* sum = sum + a[k]*p[colidx[k]]; */
/* } */
/* //w[j] = sum; */
/* q[j] = sum; */
/* } */
const unsigned long Q_BASE_CASE = 1;
void compute_q(int *rowstr, int *colidx, double *p, double *q, double *a, unsigned long i, unsigned long e){
if(e - i >= NA/4-100){
//start = ktiming_getmark();
}
unsigned long mid = (i+e)/2;
if(e - i <= Q_BASE_CASE){
for(int j = i; j < e; j++){
double sum = 0.0;
//printf("j: %d, rowstr: %d, rowstr+1: %d, work: %d\n", j, rowstr[j], rowstr[j+1], rowstr[j+1]-rowstr[j]);
/*
for (int k = rowstr[j]; k < rowstr[j+1]; k++){
//printf("k: %d, colidx: %d, p: %f, NA: %d\n", k, colidx[k], p[colidx[k]], NA);
sum += a[k]* p[colidx[k]];
}*/
/*#ifndef NO_PIN
sum = reduce_add_mul_idx(sum, a, p, colidx, rowstr[j], rowstr[j+1]);
#else*/
for (int k = rowstr[j]; k < rowstr[j+1]; k++){
sum += a[k] * p[colidx[k]];
}
//#endif
q[j] = sum;
}
return;
}
cilk_spawn compute_q(rowstr, colidx, p, q, a, i, mid);
compute_q(rowstr, colidx, p, q, a, mid, e);
cilk_sync;
if(e - i >= NA/4-100){
//clockmark_t end = ktiming_getmark();
//uint64_t diff = ktiming_diff_usec(&start, &end);
//printf("NZ: %d, diff : %lu at i: %d, e: %d, start:%d, end: %d, amount of work: %d, a bind at %d, i am at %d\n", NZ, diff, i, e, rowstr[i], rowstr[e], rowstr[e] - rowstr[i], get_mem_binding(&a[(rowstr[e] + rowstr[i])/2]), numa_node_of_cpu(sched_getcpu()));
}
}
const unsigned long MAP_BASE_CASE = BASE_CASE;
void map_add_mul(double *res, double *A, double *B, double alpha, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
if(e - i < MAP_BASE_CASE){
for(int j = i; j < e; j++){
res[j] = A[j] + B[j] * alpha;
}
return;
}
cilk_spawn map_add_mul(res, A, B, alpha, i, mid);
map_add_mul(res, A, B, alpha, mid, e);
cilk_sync;
}
/*#ifdef NO_PIN
const unsigned long IDX_MUL_BASE_CASE = 512;
double reduce_add_mul_idx(double init_sum, double *arr1, double *arr2, int *idx, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
//printf("here: e- i : %d\n", e-i);
if( e - i < IDX_MUL_BASE_CASE){
for(int j = i; j < e; j++){
init_sum += arr1[j] * arr2[idx[j]];
}
return init_sum;
}
double k = cilk_spawn reduce_add_mul_idx(init_sum, arr1, arr2, idx, i, mid);
double l = reduce_add_mul_idx(init_sum, arr1, arr2, idx, mid, e);
cilk_sync;
return k+l;
}
#endif*/
/* for (unsigned long j = 1; j <= lastrow-firstrow+1; j++) { */
/* d = 0.0; */
/* for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { */
/* d = d + a[k]*z[colidx[k]]; */
/* } */
/* r[j] = d; */
/* } */
const unsigned long NORM_BASE_CASE = 1;
void compute_norm(int *rowstr, int *colidx, double *a, double *z, double *r, unsigned long i, unsigned long e){
unsigned long mid = (i+e)/2;
if(e - i <= NORM_BASE_CASE){
for(int j = i; j < e; j++){
r[j] = reduce_add_mul_idx(0.0, a, z, colidx, rowstr[j], rowstr[j+1]);
}
return;
}
cilk_spawn compute_norm(rowstr, colidx, a, z, r, i, mid);
compute_norm(rowstr, colidx, a, z, r, mid, e);
cilk_sync;
}
#ifndef NO_PIN
long find_num(int *arr, int num, unsigned long i, unsigned long e){
unsigned long mid = (i + e)/2;
if(e - i < BASE_CASE){
long idx = -1;
for(int j = i; j < e; j++){
if (arr[j] > num){
idx= j-1;
break;
}
}
return idx;
}
long res1= cilk_spawn find_num(arr, num, i, mid);
long res2= find_num(arr, num, mid, e);
cilk_sync;
if(res1 != -1){
return res1;
}
if(res2 != -1){
return res2;
}
return -1;
}
#endif
/* #pragma omp for reduction(+:sum) */
/* for (j = 1; j <= lastcol-firstcol+1; j++) { */
/* d = x[j] - r[j]; */
/* sum = sum + d*d; */
/* } */
/* //} //end omp parallel */
/* (*rnorm) = sqrt(sum); */
/* } */
const unsigned long COMPUTE_SUM_BASE_CASE = BASE_CASE;
double compute_sum(double *x, double *r, unsigned long i, unsigned long e){
unsigned long mid = (i + e) / 2;
if(e - i < COMPUTE_SUM_BASE_CASE){
double sum = 0.0;
for(unsigned long j = i; j < e; j++){
double d = x[j] - r[j];
sum += d*d;
}
return sum;
}
double v1 = cilk_spawn compute_sum(x, r, i, mid);
double v2 = compute_sum(x, r, mid, e);
cilk_sync;
return v1 + v2;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void conj_grad (
int colidx[], /* colidx[1:nzz] */
int rowstr[], /* rowstr[1:naa+1] */
double x[], /* x[*] */
double z[], /* z[*] */
double a[], /* a[1:nzz] */
double p[], /* p[*] */
double q[], /* q[*] */
double r[], /* r[*] */
//double w[], /* w[*] */
double *rnorm )
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c Floaging point arrays here are named as in NPB1 spec discussion of
c CG algorithm
c---------------------------------------------------------------------*/
{
static int callcount = 0;
double d, sum, rho, rho0, alpha, beta;
int i, j, k;
int cgit, cgitmax = 25;
rho = 0.0;
//#pragma omp parallel default(shared) private(j,sum) shared(rho,naa)
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
//{
/* cilk_for (unsigned long j = 1; j <= naa+1; j++) { */
/* q[j] = 0.0; */
/* z[j] = 0.0; */
/* r[j] = x[j]; */
/* p[j] = r[j]; */
/* //w[j] = 0.0; */
/* } */
#ifndef NO_PIN
int sockets = __cilkrts_num_sockets();
unsigned long init_unit = (1 + naa+2)/4;
int start_spawn1 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
cilk_spawn initialize(q, z, r, p, x, start_spawn1, start_spawn1+init_unit); // @0
}else{
initialize(q, z, r, p, x, start_spawn1, naa+2); // @3
__cilkrts_unset_pinning_info();
}
start_spawn1 += init_unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
#else
initialize(q, z, r, p, x, 1, naa+2);
#endif
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
/* #pragma omp for reduction(+:rho) */
/* for (j = 1; j <= lastcol-firstcol+1; j++) { */
/* rho = rho + r[j]*r[j]; */
/* } */
#ifndef NO_PIN
unsigned long unit = (lastcol-firstcol+2)/sockets;
int start_spawn2 = 1;
double rhos[sockets];
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
rhos[i] = cilk_spawn reduce_add_mul(rho, r, r, start_spawn2, start_spawn2+unit);
}else{
rhos[i] = reduce_add_mul(rho, r, r, start_spawn2, lastrow-firstrow+2);
__cilkrts_unset_pinning_info();
}
start_spawn2 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for (int i = 0; i < sockets; i++){
rho += rhos[i];
}
#else
rho = reduce_add_mul(rho, r, r, 1, lastcol-firstcol+2);
#endif
//}/* end omp parallel */
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
rho0 = rho;
d = 0.0;
rho = 0.0;
int last_ele = rowstr[lastrow-firstrow+2];
//int rmid1 = cilk_spawn find_num(rowstr, last_ele/4, 0, lastrow-firstrow+2);
//int rmid = cilk_spawn find_num(rowstr, last_ele/2, 0, lastrow-firstrow+2);
//int rmid2 = find_num(rowstr, last_ele/4*3, 0, lastrow-firstrow+2);
//cilk_sync;
//printf("rmid1: %d, rmid: %d, rmid2: %d, last: %d\n", rmid1, rmid, rmid2, lastrow-firstrow+2);
//printf("%d, %d, %d, %d\n", rowstr[rmid1] - rowstr[1], rowstr[rmid] - rowstr[rmid1], rowstr[rmid2] - rowstr[rmid], last_ele - rowstr[rmid2]);
//printf("prev %d, %d, %d, %d\n", rowstr[mid1] - rowstr[1], rowstr[mid] - rowstr[mid1], rowstr[mid2] - rowstr[mid], last_ele - rowstr[mid2]);
#ifndef NO_PIN
int start_spawn3 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
cilk_spawn compute_q(rowstr, colidx, p, q, a, start_spawn3, start_spawn3+unit);
}else{
compute_q(rowstr, colidx, p, q, a, start_spawn3, lastrow-firstrow+2);
__cilkrts_unset_pinning_info();
}
start_spawn3 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
#else
compute_q(rowstr, colidx, p, q, a, 1, lastrow-firstrow+2);
#endif
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
/*
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0;
}
*/
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
/* #pragma omp for reduction(+:d) */
/* for (j = 1; j <= lastcol-firstcol+1; j++) { */
/* d = d + p[j]*q[j]; */
/* } */
/* #pragma omp barrier */
#ifndef NO_PIN
double ds[sockets];
int start_spawn4 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
ds[i] = cilk_spawn reduce_add_mul(d, p, q, start_spawn4, start_spawn4+unit);
}else{
ds[i] = reduce_add_mul(d, p, q, start_spawn4, lastcol-firstcol+2);
__cilkrts_unset_pinning_info();
}
start_spawn4 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for(int i = 0; i < sockets; i++){
d+= ds[i];
}
#else
d = reduce_add_mul(d, p, q, 1, lastcol-firstcol+2);
#endif
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
//#pragma omp single
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
//#pragma omp for reduction(+:rho)
//remember we can actually combine the operations here
/* cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) { */
/* z[j] = z[j] + alpha*p[j]; */
/* r[j] = r[j] - alpha*q[j]; */
/* } */
#ifndef NO_PIN
int start_spawn5 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
cilk_spawn map_add_mul(z, z, p, alpha, start_spawn5, start_spawn5+unit); // @0
SET_PIN(pin_pattern[i+1]);
cilk_spawn map_add_mul(r, r, q, -alpha, start_spawn5, start_spawn5+unit); // @0
}else{
cilk_spawn map_add_mul(z, z, p, alpha, start_spawn5, lastcol-firstcol+2); // @3
map_add_mul(r, r, q, -alpha, start_spawn5, lastcol-firstcol+2); // @3
__cilkrts_unset_pinning_info();
}
start_spawn5 += unit;
}
SET_PIN(pin_pattern[0]);
#else
cilk_spawn map_add_mul(z, z, p, alpha, 1, lastcol-firstcol+2);
map_add_mul(r, r, q, -alpha, 1, lastcol-firstcol+2);
#endif
cilk_sync;
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {*/
//rho = rho + r[j]*r[j];
//}
//#pragma omp barrier
#ifndef NO_PIN
double rhos[sockets];
int start_spawn6 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
rhos[i] = cilk_spawn reduce_add_mul(rho, r, r, start_spawn6, start_spawn6+unit);
}else{
rhos[i] = cilk_spawn reduce_add_mul(rho, r, r, start_spawn6, lastcol-firstcol+2);
__cilkrts_unset_pinning_info();
}
start_spawn6 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for(int i = 0; i < sockets; i++){
rho += rhos[i];
}
#else
rho = reduce_add_mul(rho, r, r, 1, lastcol-firstcol+2);
#endif
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
//#pragma omp single
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
//#pragma omp for nowait
/* cilk_for (unsigned long j = 1; j <= lastcol-firstcol+1; j++) { */
/* p[j] = r[j] + beta*p[j]; */
/* } */
#ifndef NO_PIN
int start_spawn7 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
cilk_spawn map_add_mul(p, r, p, beta, start_spawn7, start_spawn7+unit);
}else{
map_add_mul(p, r, p, beta, start_spawn7, lastcol-firstcol+2);
__cilkrts_unset_pinning_info();
}
start_spawn7 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
#else
map_add_mul(p, r, p, beta, 1, lastcol-firstcol+2);
#endif
callcount++;
//} /* end omp parallel */
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
sum = 0.0;
//#pragma omp parallel default(shared) private(j,d) shared(sum)
//{
//#pragma omp for //private(d, k)
/* for (unsigned long j = 1; j <= lastrow-firstrow+1; j++) { */
/* d = 0.0; */
/* for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { */
/* d = d + a[k]*z[colidx[k]]; */
/* } */
/* r[j] = d; */
/* } */
#ifndef NO_PIN
int start_spawn8 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
cilk_spawn compute_norm(rowstr, colidx, a, z, r, start_spawn8, start_spawn8+unit);
}else{
compute_norm(rowstr, colidx, a, z, r, start_spawn8, lastrow-firstrow+2);
__cilkrts_unset_pinning_info();
}
start_spawn8 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
#else
compute_norm(rowstr, colidx, a, z, r, 1, lastrow-firstrow+2);
#endif
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
//todo: finish the reduction here
/* #pragma omp for reduction(+:sum) */
/* for (j = 1; j <= lastcol-firstcol+1; j++) { */
/* d = x[j] - r[j]; */
/* sum = sum + d*d; */
/* } */
/* //} //end omp parallel */
/* (*rnorm) = sqrt(sum); */
/* } */
#ifndef NO_PIN
double sums[sockets];
int start_spawn9 = 1;
for(int i = 0; i < sockets; i++){
if(i != sockets - 1){
SET_PIN(pin_pattern[i+1]);
sums[i] = cilk_spawn compute_sum(x, r, start_spawn9, start_spawn9+unit);
}else{
sums[i] = compute_sum(x, r, start_spawn9, lastcol-firstcol+2);
__cilkrts_unset_pinning_info();
}
start_spawn9 += unit;
}
SET_PIN(pin_pattern[0]);
cilk_sync;
for(int i = 0; i < sockets; i++){
sum += sums[i];
}
#else
sum = compute_sum(x, r, 1, lastcol-firstcol+2);
#endif
(*rnorm) = sqrt(sum);
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
double a[], /* a[1:nz] */
int colidx[], /* colidx[1:nz] */
int rowstr[], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
double rcond,
int arow[], /* arow[1:nz] */
int acol[], /* acol[1:nz] */
double aelt[], /* aelt[1:nz] */
double v[], /* v[1:n+1] */
int iv[], /* iv[1:2*n+1] */
double shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
double size, ratio, scale;
int jcol;
size = 1.0;
ratio = pow(rcond, (1.0 / (double)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
//cilkfor
for (unsigned long i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
//printf("Space for matrix elements exceeded in"
// " makea\n");
//printf("nnza, nzmax = %d, %d\n", nnza, nz);
//printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
//printf("Space for matrix elements exceeded in makea\n");
//printf("nnza, nzmax = %d, %d\n", nnza, nz);
//printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
double a[], /* a[1:*] */
int colidx[], /* colidx[1:*] */
int rowstr[], /* rowstr[1:*] */
int n,
int arow[], /* arow[1:*] */
int acol[], /* acol[1:*] */
double aelt[], /* aelt[1:*] */
int firstrow,
int lastrow,
double x[], /* x[1:n] */
boolean mark[], /* mark[1:n] */
int nzloc[], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
double xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
//cilk_for
for (unsigned long j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c ... preload data pages
c---------------------------------------------------------------------*/
//cilkfor
for(unsigned long j = 0;j <= nrows-1;j++) {
//cilkfor
for(unsigned long k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
//cilkfor
for (unsigned long i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0;
if (xi != 0.0) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(double x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
double val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
int main(int argc, char **argv){
__cilkrts_init();
#if TIMING_COUNT
elapsed = malloc(TIMING_COUNT * sizeof(uint64_t));
for(int i=0; i < TIMING_COUNT; i++){
begin = 0;
end = 0;
#else
int i = 0;
#endif
#ifndef NO_PIN
__cilkrts_pin_top_level_frame_at_socket(0);
sockets = __cilkrts_num_sockets();
if(sockets == 2){
int mem_patternT[] = {0, 1};
int pin_patternT[] = {0, 1};
memcpy(mem_pattern, mem_patternT, 4*sizeof(int));
memcpy(pin_pattern, pin_patternT, 4*sizeof(int));
}else if(sockets == 3){
int mem_patternT[] = {0, 2, 1};
int pin_patternT[] = {0, 2, 1};
memcpy(mem_pattern, mem_patternT, 4*sizeof(int));
memcpy(pin_pattern, pin_patternT, 4*sizeof(int));
}else if(sockets == 4){
int mem_patternT[] = {0, 1, 2, 3};
int pin_patternT[] = {0, 1, 2, 3};
memcpy(mem_pattern, mem_patternT, 4*sizeof(int));
memcpy(pin_pattern, pin_patternT, 4*sizeof(int));
}
#else
unsigned long nodemask = 0;
for(int i = 0; i < __cilkrts_get_nworkers() / CPUS_PER_SOCKET; i++) {
nodemask |= (1L << i);
}
set_mempolicy(MPOL_INTERLEAVE, &nodemask ,sizeof(nodemask)*8);
#endif
fakemain(argc, argv, i);
#if TIMING_COUNT
}
print_runtime(elapsed, TIMING_COUNT);
#endif
return 0;
}
|
lastpass_fmt_plug.c | /* LastPass offline cracker patch for JtR. Hacked together during January of 2013 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* All the hard work was done by Milen (author of hashkill).
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lastpass;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lastpass);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include <openssl/aes.h>
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "lp"
#define FORMAT_NAME "LastPass offline"
#define FORMAT_TAG "$lp$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests lastpass_tests[] = {
{"$lp$hackme@mailinator.com$6f5d8cec3615fc9ac7ba2e0569bce4f5", "strongpassword"},
{"$lp$3$27c8641d7f5ab5985569d9d0b499b467", "123"},
{"$lp$ninechars$d09153108a89347da5c97a4a18f91345", "PassWord"},
{"$lp$anicocls$764b0f54528eb4a4c93aab1b18af28a5", ""},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int iterations;
int salt_length;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* email */
goto err;
if (strlen(p) > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (hexlenl(p, &extra) != 32 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$lp$" */
p = strtokm(ctcopy, "$");
strncpy((char*)cs.salt, p, 32);
cs.salt_length = strlen((char*)p);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
AES_KEY akey;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT];
ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8];
union {
ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
x.pout[i] = key[i];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, 500, &(x.poutc), 32, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memset(&akey, 0, sizeof(AES_KEY));
AES_set_encrypt_key((unsigned char*)key[i], 256, &akey);
AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT);
}
#else
unsigned char key[32];
pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->salt_length, 500, key, 32, 0);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_encrypt_key((unsigned char*)key, 256, &akey);
AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[index], &akey, AES_ENCRYPT);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void lastpass_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_lastpass = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
lastpass_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
lastpass_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
DRACC_OMP_021_Large_Data_Copy_no.c | /*
Matrix Addition with large matrices, and copying them whole.
All Matrices are to big to fit on the accelerator whole resulting in a segmentation fault. Executes in host fallback.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#define C 51200L
int64_t *a;
int64_t *b;
int64_t *c;
int init(){
for(int64_t i=0; i<C; i++){
for(int64_t j=0; j<C; j++){
a[j+i*C]=1;
b[j+i*C]=1;
c[j+i*C]=0;
}
}
return 0;
}
int add(){
#pragma omp target map(to:a[0:C*C],b[0:C*C]) map(from:c[0:C*C]) device(0)
{
#pragma omp teams
printf("Executed on host: %s\n",omp_is_initial_device() ? "true" : "false");
#pragma omp teams distribute parallel for collapse(2)
for(int64_t i=0; i<C; i++){
for(int64_t j=0; j<C; j++){
c[j+i*C]=b[j+i*C] + a[j+i*C];
}
}
}
return 0;
}
int check(){
bool test = false;
for(int64_t i=0; i<C*C; i++){
if(c[i]!=2){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = (int64_t *) malloc(C*C*sizeof(int64_t));
b = (int64_t *) malloc(C*C*sizeof(int64_t));
c = (int64_t *) malloc(C*C*sizeof(int64_t));
init();
add();
check();
free(a);
free(b);
free(c);
return 0;
} |
GB_unaryop__ainv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_uint64
// op(A') function: GB_tran__ainv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for private(m, c)
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
//int threads = atoi(argv[argc-1]);
omp_set_num_threads(atoi(argv[argc-1]));
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end;
end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
sph_compute.h | #ifndef SPH_COMPUTE_H
#define SPH_COMPUTE_H
#pragma omp declare simd
double w_bspline_3d(double r, double h);
double w_bspline_3d_constant(double h);
#pragma omp declare simd
double w_bspline_3d_simd(double q);
#pragma omp declare simd
double dwdq_bspline_3d_simd(double q);
#define w_bspline_3d_LUT(q) w_bspline_3d_LUT_128(q)
#pragma omp declare simd
double w_bspline_3d_LUT_1024(double q);
#pragma omp declare simd
double w_bspline_3d_LUT_128(double q);
#pragma omp declare simd
double w_bspline_3d_LUT_32(double q);
int compute_density_3d(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_innerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_loopswapped(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_load_ballanced(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_symmetrical_load_ballance(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_symmetrical_lb_branching(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_2d(int N, double h, SPHparticle *lsph, linkedListBox *box);
#endif
|
argon2_fmt_plug.c | /*
* This software is Copyright (c) 2016 Agnieszka Bielec <bielecagnieszka8 at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* merged argon2d and argon2i into a single format file. JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_argon2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_argon2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "argon2.h"
#include "argon2_core.h"
#include "argon2_encoding.h"
#include "memdbg.h"
#define FORMAT_LABEL "argon2"
#define FORMAT_NAME ""
#define FORMAT_TAG_d "$argon2d$"
#define FORMAT_TAG_i "$argon2i$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG_d)-1)
#if defined(__XOP__)
#define ALGORITHM_NAME "Blake2 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Blake2 AVX"
#elif defined(__SSSE3__)
#define ALGORITHM_NAME "Blake2 SSSE3"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Blake2 SSE2"
#else
#define ALGORITHM_NAME "Blake2"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 100 //only in john
#define BINARY_SIZE 256 //only in john
#define BINARY_ALIGN 1
#define SALT_SIZE 64 //only in john
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define OMP_SCALE 16
#ifdef _OPENMP
#define THREAD_NUMBER omp_get_thread_num()
#else
#define THREAD_NUMBER 1
#endif
static struct fmt_tests tests[] = {
{"$argon2d$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$w9w3s5/zV8+PcAZlJhnTCOE+vBkZssmZf6jOq3dKv50","password"},
{"$argon2i$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$N59QwnpxDQZRj1/cO6bqm408dD6Z2Z9LKYpwFJSPVKA","password"},
{"$argon2d$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$zMrTcOAOUje6UqObRVh84Pe1K6gumcDqqGzRM0ILzYmj","sacrificed"},
{"$argon2i$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$1l4kAwUdAApoCbFH7ghBEf7bsdrOQzE4axIJ3PV0Ncrd","sacrificed"},
{"$argon2d$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$TLSTPihIo+5F67Y1vJdfWdB9","blessed_dead"},
{"$argon2i$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$vvjDVog22A5x9eljmB+2yC8y","blessed_dead"},
{"$argon2d$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$yw93eMxC8REPAwbQ0e/q43jR9+RI9HI/DHP75uzm7tQfjU734oaI3dzcMWjYjHzVQD+J4+MG+7oyD8dN/PtnmPCZs+UZ67E+rkXJ/wTvY4WgXgAdGtJRrAGxhy4rD7d5G+dCpqhrog","death_dying"},
{"$argon2i$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$K7unxwO5aeuZCpnIJ06FMCRKod3eRg8oIRzQrK3E6mGbyqlTvvl47jeDWq/5drF1COJkEF9Ty7FWXJZHa+vqlf2YZGp/4qSlAvKmdtJ/6JZU32iQItzMRwcfujHE+PBjbL5uz4966A","death_dying"},
{NULL}
};
struct argon2_salt {
uint32_t t_cost, m_cost, lanes;
uint32_t hash_size;
uint32_t salt_length;
char salt[SALT_SIZE];
argon2_type type;
};
static struct argon2_salt saved_salt;
static region_t * memory;
static void **pseudo_rands;
static char *saved_key;
static int threads;
static size_t saved_mem_size;
static uint32_t saved_segment_length;
static unsigned char *crypted;
static void *get_salt(char *ciphertext);
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
threads=omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#else
threads=1;
#endif
saved_key =
malloc(self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1));
memset(saved_key, 0,
self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1));
crypted = malloc(self->params.max_keys_per_crypt * (BINARY_SIZE));
memset(crypted, 0, self->params.max_keys_per_crypt * (BINARY_SIZE));
memory=malloc(threads*sizeof(region_t));
pseudo_rands=malloc(threads*sizeof(void*));
for (i=0;i<threads;i++)
{
init_region_t(&memory[i]);
pseudo_rands[i]=NULL;
}
saved_mem_size=0;
saved_segment_length=0;
}
static void done(void)
{
int i;
free(saved_key);
free(crypted);
for (i=0;i<threads;i++)
{
free_region_t(&memory[i]);
free(pseudo_rands[i]);
}
free(memory);
free(pseudo_rands);
}
static void print_memory(double memory)
{
char s[]="\0kMGT";
int i=0;
while(memory>=1024)
{
memory/=1024;
i++;
}
printf("memory per hash : %.2lf %cB\n",memory,s[i]);
}
static void reset(struct db_main *db)
{
static int printed=0;
if (!printed && options.verbosity > VERB_LEGACY)
{
int i;
uint32_t m_cost, prev_m_cost;
m_cost=prev_m_cost=0;
if (!db) {
for (i = 0; tests[i].ciphertext; i++)
{
struct argon2_salt *salt;
salt=get_salt(tests[i].ciphertext);
m_cost = MAX(m_cost, salt->m_cost);
if (i==0)
{
printf("\n");
prev_m_cost=m_cost;
print_memory(sizeof(block)*m_cost);
}
}
if (prev_m_cost!=m_cost)
{
printf("max ");
print_memory(sizeof(block)*m_cost);
}
} else {
struct db_salt *salts = db->salts;
while (salts != NULL) {
struct argon2_salt * salt=salts->salt;
m_cost = MAX(m_cost, salt->m_cost);
salts = salts->next;
}
printf("\n");
print_memory(sizeof(block)*m_cost);
}
}
}
static void ctx_init(argon2_context *ctx)
{
//size_t maxadlen = ctx->adlen;
//size_t maxsaltlen = ctx->saltlen;
//size_t maxoutlen = ctx->outlen;
static uint8_t out[BINARY_SIZE];
static uint8_t salt[SALT_SIZE];
ctx->adlen=0;
ctx->saltlen=SALT_SIZE;
ctx->outlen=BINARY_SIZE;
ctx->out=out;
ctx->salt=salt;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
argon2_context ctx;
int res;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_d);
else if (!strncmp(ciphertext, FORMAT_TAG_i, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_i);
else
return 0;
if (res!=ARGON2_OK || ctx.outlen < 8)
return 0;
return 1;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
if (len > PLAINTEXT_LENGTH)
len = PLAINTEXT_LENGTH;
memcpy(saved_key + index * (PLAINTEXT_LENGTH + 1), key, len);
saved_key[index * (PLAINTEXT_LENGTH + 1) + len] = 0;
}
static char *get_key(int index)
{
return saved_key + index * (PLAINTEXT_LENGTH + 1);
}
static void *get_binary(char *ciphertext)
{
static char out[BINARY_SIZE];
argon2_context ctx;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
argon2_decode_string(&ctx, ciphertext, Argon2_d);
else
argon2_decode_string(&ctx, ciphertext, Argon2_i);
memset(out, 0, BINARY_SIZE);
memcpy(out, ctx.out, ctx.outlen);
return out;
}
static void *get_salt(char *ciphertext)
{
static struct argon2_salt salt;
argon2_context ctx;
memset(&salt,0,sizeof(salt));
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) {
argon2_decode_string(&ctx, ciphertext, Argon2_d);
salt.type = Argon2_d;
} else {
argon2_decode_string(&ctx, ciphertext, Argon2_i);
salt.type = Argon2_i;
}
salt.salt_length = ctx.saltlen;
salt.m_cost = ctx.m_cost;
salt.t_cost = ctx.t_cost;
salt.lanes = ctx.lanes;
salt.hash_size = ctx.outlen;
memcpy(salt.salt, ctx.salt, ctx.saltlen);
return (void *)&salt;
}
static void set_salt(void *salt)
{
uint32_t i;
size_t mem_size;
uint32_t segment_length, memory_blocks;
memcpy(&saved_salt,salt,sizeof(struct argon2_salt));
mem_size=sizeof(block)*saved_salt.m_cost;
memory_blocks = saved_salt.m_cost;
if (memory_blocks < 2 * ARGON2_SYNC_POINTS * saved_salt.lanes) {
memory_blocks = 2 * ARGON2_SYNC_POINTS * saved_salt.lanes;
}
segment_length = memory_blocks / (saved_salt.lanes * ARGON2_SYNC_POINTS);
if (mem_size>saved_mem_size)
{
if (saved_mem_size>0)
for (i=0;i<threads;i++)
free_region_t(&memory[i]);
for (i=0;i<threads;i++)
alloc_region_t(&memory[i],mem_size);
saved_mem_size=mem_size;
}
if (segment_length>saved_segment_length)
{
if (saved_segment_length>0)
for (i=0;i<threads;i++)
free(pseudo_rands[i]);
for (i=0;i<threads;i++)
pseudo_rands[i]=malloc(sizeof(uint64_t) * segment_length);
saved_segment_length=segment_length;
}
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++) {
if (!memcmp(binary, crypted + i * BINARY_SIZE, saved_salt.hash_size))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypted + index * BINARY_SIZE, saved_salt.hash_size);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
argon2_hash(saved_salt.t_cost, saved_salt.m_cost, saved_salt.lanes, saved_key + i * (PLAINTEXT_LENGTH + 1), strlen(saved_key + i * (PLAINTEXT_LENGTH + 1)), saved_salt.salt,
saved_salt.salt_length, crypted + i * BINARY_SIZE, saved_salt.hash_size, 0, 0, saved_salt.type, ARGON2_VERSION_NUMBER, memory[THREAD_NUMBER%threads].aligned, pseudo_rands[THREAD_NUMBER%threads]);
}
return count;
}
static int get_hash_0(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xF;
}
static int get_hash_1(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xFF;
}
static int get_hash_2(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xFFF;
}
static int get_hash_3(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xFFFF;
}
static int get_hash_4(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xFFFFF;
}
static int get_hash_5(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0xFFFFFF;
}
static int get_hash_6(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & 0x7FFFFFF;
}
static int salt_hash(void *_salt)
{
int i;
struct argon2_salt *salt = (struct argon2_salt*)_salt;
unsigned int hash = 0;
char *p = salt->salt;
for (i=0;i<salt->salt_length;i++) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
#if FMT_MAIN_VERSION > 11
static unsigned int tunable_cost_t(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->t_cost;
}
static unsigned int tunable_cost_m(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->m_cost;
}
static unsigned int tunable_cost_p(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->lanes;
}
static unsigned int tunable_cost_type(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return (int)salt->type;
}
#endif
struct fmt_main fmt_argon2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
sizeof(struct argon2_salt),
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"t",
"m",
"p",
"type [0:Argon2d 1:Argon2i]"
},
{0},
tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_t,
tunable_cost_m,
tunable_cost_p,
tunable_cost_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
ParallelFor.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <cstdint>
#include <vector>
#include "open3d/core/Device.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Parallel.h"
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include "open3d/core/CUDAUtils.h"
#endif
namespace open3d {
namespace core {
#ifdef __CUDACC__
static constexpr int64_t OPEN3D_PARFOR_BLOCK = 128;
static constexpr int64_t OPEN3D_PARFOR_THREAD = 4;
/// Calls f(n) with the "grid-stride loops" pattern.
template <int64_t block_size, int64_t thread_size, typename func_t>
__global__ void __ElementWiseKernel(int64_t n, func_t f) {
int64_t items_per_block = block_size * thread_size;
int64_t idx = blockIdx.x * items_per_block + threadIdx.x;
#pragma unroll
for (int64_t i = 0; i < thread_size; ++i) {
if (idx < n) {
f(idx);
idx += block_size;
}
}
}
/// Run a function in parallel with CUDA.
///
/// \param device The device for the ParallelFor to run on.
/// \param n The number of workloads.
/// \param func The function to be executed in parallel. The function should
/// take an int64_t workload index and returns void, i.e., `void func(int64_t)`.
///
/// \note This is optimized for uniform work items, i.e. where each call to \p
/// func takes the same time.
/// \note If you use a lambda function, capture only the required variables
/// instead of all to prevent accidental race conditions. If you want the
/// kernel to be used on both CPU and CUDA, capture the variables by value.
template <typename func_t>
void ParallelFor(const Device& device, int64_t n, const func_t& func) {
if (device.GetType() != Device::DeviceType::CUDA) {
utility::LogError("ParallelFor for CUDA cannot run on device {}.",
device.ToString());
}
if (n == 0) {
return;
}
CUDAScopedDevice scoped_device(device);
int64_t items_per_block = OPEN3D_PARFOR_BLOCK * OPEN3D_PARFOR_THREAD;
int64_t grid_size = (n + items_per_block - 1) / items_per_block;
__ElementWiseKernel<OPEN3D_PARFOR_BLOCK, OPEN3D_PARFOR_THREAD>
<<<grid_size, OPEN3D_PARFOR_BLOCK, 0, core::cuda::GetStream()>>>(
n, func);
OPEN3D_GET_LAST_CUDA_ERROR("ParallelFor failed.");
}
#else
/// \brief Run a function in parallel on CPU.
///
/// \param device The device for the ParallelFor to run on.
/// \param n The number of workloads.
/// \param func The function to be executed in parallel. The function should
/// take an int64_t workload index and returns void, i.e., `void func(int64_t)`.
///
/// \note This is optimized for uniform work items, i.e. where each call to \p
/// func takes the same time.
/// \note If you use a lambda function, capture only the required variables
/// instead of all to prevent accidental race conditions. If you want the
/// kernel to be used on both CPU and CUDA, capture the variables by value.
template <typename func_t>
void ParallelFor(const Device& device, int64_t n, const func_t& func) {
if (device.GetType() != Device::DeviceType::CPU) {
utility::LogError("ParallelFor for CPU cannot run on device {}.",
device.ToString());
}
if (n == 0) {
return;
}
#pragma omp parallel for num_threads(utility::EstimateMaxThreads())
for (int64_t i = 0; i < n; ++i) {
func(i);
}
}
#endif
} // namespace core
} // namespace open3d
|
clique_cmap.h | #pragma once
#include "cmap.h"
#include "graph.h"
#include "emb_list.h"
void cmap_3clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "3-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
auto y0 = g.N(v0);
for (auto v1 : y0) {
local_counter += intersection_num(y0, g.N(v1));
}
counter += local_counter;
}
total = counter;
}
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "4-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
auto y0 = g.N(v0);
#if 0
for (auto v1 : y0) {
auto y1 = g.N(v1);
auto y0y1 = y0 & y1;
for (auto v2 : y0y1)
counter += intersection_num(y0y1, g.N(v2));
}
#else
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
for (auto v3 : g.N(v2)) {
// if (cmap.get(v3) == 2)
// local_counter ++;
local_counter += (cmap.get(v3) == 2);
}
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) cmap.set(u, 0);
#endif
counter += local_counter;
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "5-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
auto y0 = g.N(v0);
uint64_t local_counter = 0;
#if 0
for (auto v1 : y0) {
auto y1 = g.N(v1);
auto y0y1 = y0 & y1;
for (auto v2 : y0y1) {
auto y2 = g.N(v2);
auto y0y1y2 = y0y1 & y2;
for (auto v3 : y0y1y2)
local_counter += intersection_num(y0y1y2, g.N(v3));
}
}
#else
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
VertexSet y0y1y2;
y0y1y2.clear();
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
y0y1y2.add(u);
}
}
for (auto v3 : y0y1y2) {
for (auto v4 : g.N(v3)) {
// if (cmap.get(v4) == 3)
// local_counter ++;
local_counter += (cmap.get(v4) == 3);
}
}
for (auto u : y0y1y2) cmap.set(u, 2);
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) cmap.set(u, 0);
#endif
counter += local_counter;
}
total = counter;
}
// ad-hoc 4-clique
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "4-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v2 = emb_list.get_vertex(2, emb_id);
for (auto v3 : g.N(v2)) {
// if (cmap.get(v3) == 2)
// counter ++;
counter += (cmap.get(v3) == 2);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v = emb_list.get_vertex(2, emb_id);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "5-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
uint64_t local_counter = 0;
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v2 = emb_list.get_vertex(2, id2);
emb_list.set_size(3, 0);
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
emb_list.add_emb(3, u);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v3 = emb_list.get_vertex(3, id3);
for (auto v4 : g.N(v3)) {
// if (cmap.get(v4) == 3)
// local_counter ++;
local_counter += (cmap.get(v4) == 3);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v = emb_list.get_vertex(3, id3);
cmap.set(v, 2);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v = emb_list.get_vertex(2, id2);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
counter += local_counter;
}
total = counter;
}
void cmap_kclique(Graph &g, unsigned k, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
if (k == 3) {
cmap_3clique(g, total, cmaps);
} else if (k == 4) {
cmap_4clique(g, total, cmaps);
} else {
cmap_5clique(g, total, cmaps);
}
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 2)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
reconstruction.c | /*---------------------------------------------------------------------------------
RECONSTRUCTION.C
-Linear, WENO and MP5 reconstruction algorithms
---------------------------------------------------------------------------------*/
#include "decs.h"
#if RECONSTRUCTION == LINEAR
#define RECON_ALGO linear_mc
#elif RECONSTRUCTION == WENO
#define RECON_ALGO weno
#elif RECONSTRUCTION == MP5
#define RECON_ALGO mp5
#else
#error "Reconstruction not specified!"
#endif
// Sanity checks
#if (RECONSTRUCTION == WENO || RECONSTRUCTION == MP5) && NG < 3
#error "not enough ghost zones! PPM/WENO/MP5 + NG < 3\n"
#endif
void linear_mc(double unused1, double x1, double x2, double x3, double unused2, double *lout, double *rout);
void weno(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout);
double median(double a, double b, double c);
double mp5_subcalc(double Fjm2, double Fjm1, double Fj, double Fjp1, double Fjp2);
void mp5(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout);
// Linear reconstruction with MC slope limiter
inline void linear_mc(double unused1, double x1, double x2, double x3, double unused2, double *lout, double *rout)
{
double Dqm,Dqp,Dqc,s;
Dqm = 2. * (x2 - x1);
Dqp = 2. * (x3 - x2);
Dqc = 0.5 * (x3 - x1);
s = Dqm * Dqp;
if (s <= 0.)
s = 0.;
else {
if (fabs(Dqm) < fabs(Dqp) && fabs(Dqm) < fabs(Dqc))
s = Dqm;
else if (fabs(Dqp) < fabs(Dqc))
s = Dqp;
else
s = Dqc;
}
// Reconstruct left, right
*lout = x2 - 0.5*s;
*rout = x2 + 0.5*s;
}
// WENO interpolation. See Tchekhovskoy et al. 2007 (T07), Shu 2011 (S11)
// Implemented by Monika Moscibrodzka
inline void weno(double x1, double x2, double x3, double x4, double x5, double *lout, double *rout)
{
// S11 1, 2, 3
double vr[3], vl[3];
vr[0] = (3./8.)*x1 - (5./4.)*x2 + (15./8.)*x3;
vr[1] = (-1./8.)*x2 + (3./4.)*x3 + (3./8.)*x4;
vr[2] = (3./8.)*x3 + (3./4.)*x4 - (1./8.)*x5;
vl[0] = (3./8.)*x5 - (5./4.)*x4 + (15./8.)*x3;
vl[1] = (-1./8.)*x4 + (3./4.)*x3 + (3./8.)*x2;
vl[2] = (3./8.)*x3 + (3./4.)*x2 - (1./8.)*x1;
// Smoothness indicators, T07 A18 or S11 8
double beta[3];
beta[0] = (13./12.)*pow(x1 - 2.*x2 + x3, 2) +
(1./4.)*pow(x1 - 4.*x2 + 3.*x3, 2);
beta[1] = (13./12.)*pow(x2 - 2.*x3 + x4, 2) +
(1./4.)*pow(x4 - x2, 2);
beta[2] = (13./12.)*pow(x3 - 2.*x4 + x5, 2) +
(1./4.)*pow(x5 - 4.*x4 + 3.*x3, 2);
// Nonlinear weights S11 9
double den, wtr[3], Wr, wr[3], wtl[3], Wl, wl[3], eps;
eps=1.e-26;
den = eps + beta[0]; den *= den; wtr[0] = (1./16.)/den;
den = eps + beta[1]; den *= den; wtr[1] = (5./8. )/den;
den = eps + beta[2]; den *= den; wtr[2] = (5./16.)/den;
Wr = wtr[0] + wtr[1] + wtr[2];
wr[0] = wtr[0]/Wr ;
wr[1] = wtr[1]/Wr ;
wr[2] = wtr[2]/Wr ;
den = eps + beta[2]; den *= den; wtl[0] = (1./16.)/den;
den = eps + beta[1]; den *= den; wtl[1] = (5./8. )/den;
den = eps + beta[0]; den *= den; wtl[2] = (5./16.)/den;
Wl = wtl[0] + wtl[1] + wtl[2];
wl[0] = wtl[0]/Wl;
wl[1] = wtl[1]/Wl;
wl[2] = wtl[2]/Wl;
*lout = vl[0]*wl[0] + vl[1]*wl[1] + vl[2]*wl[2];
*rout = vr[0]*wr[0] + vr[1]*wr[1] + vr[2]*wr[2];
}
// MP5 reconstruction from PLUTO
// Imported by Mani Chandra
#define MINMOD(a, b) ((a)*(b) > 0.0 ? (fabs(a) < fabs(b) ? (a):(b)):0.0)
inline double median(double a, double b, double c)
{
return (a + MINMOD(b - a, c - a));
}
#define ALPHA (4.0)
#define EPSM (1.e-12)
inline double mp5_subcalc(double Fjm2, double Fjm1, double Fj, double Fjp1, double Fjp2)
{
double f, d2, d2p, d2m;
double dMMm, dMMp;
double scrh1,scrh2, Fmin, Fmax;
double fAV, fMD, fLC, fUL, fMP;
f = 2.0*Fjm2 - 13.0*Fjm1 + 47.0*Fj + 27.0*Fjp1 - 3.0*Fjp2;
f /= 60.0;
fMP = Fj + MINMOD(Fjp1 - Fj, ALPHA*(Fj - Fjm1));
if ((f - Fj)*(f - fMP) <= EPSM)
return f;
d2m = Fjm2 + Fj - 2.0*Fjm1; // Eqn. 2.19
d2 = Fjm1 + Fjp1 - 2.0*Fj;
d2p = Fj + Fjp2 - 2.0*Fjp1; // Eqn. 2.19
scrh1 = MINMOD(4.0*d2 - d2p, 4.0*d2p - d2);
scrh2 = MINMOD(d2, d2p);
dMMp = MINMOD(scrh1,scrh2); // Eqn. 2.27
scrh1 = MINMOD(4.0*d2m - d2, 4.0*d2 - d2m);
scrh2 = MINMOD(d2, d2m);
dMMm = MINMOD(scrh1,scrh2); // Eqn. 2.27
fUL = Fj + ALPHA*(Fj - Fjm1); // Eqn. 2.8
fAV = 0.5*(Fj + Fjp1); // Eqn. 2.16
fMD = fAV - 0.5*dMMp; // Eqn. 2.28
fLC = 0.5*(3.0*Fj - Fjm1) + 4.0/3.0*dMMm; // Eqn. 2.29
scrh1 = fmin(Fj, Fjp1); scrh1 = fmin(scrh1, fMD);
scrh2 = fmin(Fj, fUL); scrh2 = fmin(scrh2, fLC);
Fmin = fmax(scrh1, scrh2); // Eqn. (2.24a)
scrh1 = fmax(Fj, Fjp1); scrh1 = fmax(scrh1, fMD);
scrh2 = fmax(Fj, fUL); scrh2 = fmax(scrh2, fLC);
Fmax = fmin(scrh1, scrh2); // Eqn. 2.24b
f = median(f, Fmin, Fmax); // Eqn. 2.26
return f;
}
inline void mp5(double x1, double x2, double x3, double x4, double x5, double *lout,
double *rout)
{
*rout = mp5_subcalc(x1, x2, x3, x4, x5);
*lout = mp5_subcalc(x5, x4, x3, x2, x1);
}
#undef MINMOD
// Use the pre-processor for poor man's multiple dispatch
void reconstruct(struct FluidState *S, GridPrim Pl, GridPrim Pr, int dir)
{
timer_start(TIMER_RECON);
if (dir == 1)
#pragma omp parallel for collapse(2)
PLOOP
JSLOOP(-1, N2)
ISLOOP(-1, N1)
RECON_ALGO(S->P[ip][j][i-2], S->P[ip][j][i-1], S->P[ip][j][i], S->P[ip][j][i+1], S->P[ip][j][i+2], &(Pl[ip][j][i]), &(Pr[ip][j][i]));
else if (dir == 2)
#pragma omp parallel for collapse(2)
PLOOP
JSLOOP(-1, N2)
ISLOOP(-1, N1)
RECON_ALGO(S->P[ip][j-2][i], S->P[ip][j-1][i], S->P[ip][j][i], S->P[ip][j+1][i], S->P[ip][j+2][i], &(Pl[ip][j][i]), &(Pr[ip][j][i]));
timer_stop(TIMER_RECON);
}
|
GB_binop__minus_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc32)
// A*D function (colscale): GB (_AxD__minus_fc32)
// D*A function (rowscale): GB (_DxB__minus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc32)
// C=scalar+B GB (_bind1st__minus_fc32)
// C=scalar+B' GB (_bind1st_tran__minus_fc32)
// C=A+scalar GB (_bind2nd__minus_fc32)
// C=A'+scalar GB (_bind2nd_tran__minus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_minus (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_minus (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FC32 || GxB_NO_MINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_minus (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_minus (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/composite-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image)
% MagickBooleanType AutoGammaImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set all given channels is adjusted in the same way using the
% mean average of those channels.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image)
{
return(AutoGammaImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoGammaImageChannel(Image *image,
const ChannelType channel)
{
MagickStatusType
status;
double
mean,sans,gamma,logmean;
logmean=log(0.5);
if ((channel & SyncChannels) != 0 )
{
/*
Apply gamma correction equally accross all given channels
*/
(void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception);
gamma=log(mean*QuantumScale)/logmean;
return LevelImageChannel(image, channel,
0.0, (double)QuantumRange, gamma);
}
/*
auto-gamma each channel separateally
*/
status = MagickTrue;
if ((channel & RedChannel) != 0)
{
(void) GetImageChannelMean(image,RedChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, RedChannel,
0.0, (double)QuantumRange, gamma);
}
if ((channel & GreenChannel) != 0)
{
(void) GetImageChannelMean(image,GreenChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, GreenChannel,
0.0, (double)QuantumRange, gamma);
}
if ((channel & BlueChannel) != 0)
{
(void) GetImageChannelMean(image,BlueChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, BlueChannel,
0.0, (double)QuantumRange, gamma);
}
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
{
(void) GetImageChannelMean(image,OpacityChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, OpacityChannel,
0.0, (double)QuantumRange, gamma);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) GetImageChannelMean(image,IndexChannel,&mean,&sans,
&image->exception);
gamma=log(mean*QuantumScale)/logmean;
status = status && LevelImageChannel(image, IndexChannel,
0.0, (double)QuantumRange, gamma);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image)
% MagickBooleanType AutoLevelImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o channel: The channels to auto-level. If the special 'SyncChannels'
% flag is set the min/max/mean value of all given channels is used for
% all given channels, to all channels in the same way.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image)
{
return(AutoLevelImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType AutoLevelImageChannel(Image *image,
const ChannelType channel)
{
/*
This is simply a convenience function around a Min/Max Histogram Stretch
*/
return MinMaxStretchImage(image, channel, 0.0, 0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use BrightnessContrastImage() to change the brightness and/or contrast of
% an image. It converts the brightness and contrast parameters into slope
% and intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast)
% MagickBooleanType BrightnessContrastImageChannel(Image *image,
% const ChannelType channel,const double brightness,
% const double contrast)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast)
{
MagickBooleanType
status;
status=BrightnessContrastImageChannel(image,DefaultChannels,brightness,
contrast);
return(status);
}
MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image,
const ChannelType channel,const double brightness,const double contrast)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
intercept,
coefficients[2],
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients,
&image->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MaxTextExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelPacket
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,&image->exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0: color_correction.red.slope=StringToDouble(token); break;
case 1: color_correction.green.slope=StringToDouble(token); break;
case 2: color_correction.blue.slope=StringToDouble(token); break;
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0: color_correction.red.offset=StringToDouble(token); break;
case 1: color_correction.green.offset=StringToDouble(token); break;
case 2: color_correction.blue.offset=StringToDouble(token); break;
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
switch (i)
{
case 0: color_correction.red.power=StringToDouble(token); break;
case 1: color_correction.green.power=StringToDouble(token); break;
case 2: color_correction.blue.power=StringToDouble(token); break;
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetMagickToken(p,&p,token);
color_correction.saturation=StringToDouble(token);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power)))));
cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power)))));
cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power)))));
}
if (image->storage_class == PseudoClass)
{
/*
Apply transfer function to colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
double
luma;
luma=0.2126*image->colormap[i].red+0.7152*image->colormap[i].green+
0.0722*image->colormap[i].blue;
image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma);
image->colormap[i].green=ClampToQuantum(luma+
color_correction.saturation*cdl_map[ScaleQuantumToMap(
image->colormap[i].green)].green-luma);
image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation*
cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma);
}
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.2126*q->red+0.7152*q->green+0.0722*q->blue;
q->red=ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(q->red)].red-luma));
q->green=ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(q->green)].green-luma));
q->blue=ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(q->blue)].blue-luma));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
*clut_map;
register ssize_t
i;
ssize_t
adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*clut_map));
if (clut_map == (MagickPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
exception=(&image->exception);
clut_view=AcquireCacheView(clut_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetMagickPixelPacket(clut_image,clut_map+i);
(void) InterpolateMagickPixelPacket(clut_image,clut_view,
UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust),
QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(clut_map+
ScaleQuantumToMap(q->red)));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(clut_map+
ScaleQuantumToMap(q->green)));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(clut_map+
ScaleQuantumToMap(q->blue)));
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
q->opacity=(Quantum) (QuantumRange-MagickPixelIntensityToQuantum(
clut_map+ScaleQuantumToMap((Quantum) GetAlphaPixelComponent(q))));
else
if (image->matte == MagickFalse)
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(clut_map+
ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel))));
else
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(
clut_map+ScaleQuantumToMap(q->opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=ClampToQuantum((clut_map+(ssize_t) indexes[x])->index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImageChannel)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map);
if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Contrast(sign,&q->red,&q->green,&q->blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The ContrastStretchImage() is a simple image enhancement technique that
% attempts to improve the contrast in an image by `stretching' the range of
% intensity values it contains to span a desired range of values. It differs
% from the more sophisticated histogram equalization in that it can only
% apply % a linear scaling function to the image pixel values. As a result
% the `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
intensity;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*histogram,
*stretch_map,
white;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (MagickPixelPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
status=MagickTrue;
exception=(&image->exception);
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
intensity;
intensity=PixelIntensityToQuantum(p);
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
p++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetRedPixelComponent(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetGreenPixelComponent(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetBluePixelComponent(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetOpacityPixelComponent(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(indexes[x])].index++;
p++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=0.0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=(MagickRealType) QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0.0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=(MagickRealType) QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.green)/(white.green-
black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0.0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue=(MagickRealType) QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.blue)/(white.blue-
black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0.0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=(MagickRealType) QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.opacity)/(white.opacity-
black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0.0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=(MagickRealType) QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.index)/(white.index-
black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=ClampToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
q->red=ClampToQuantum(stretch_map[ScaleQuantumToMap(q->red)].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
q->green=ClampToQuantum(stretch_map[ScaleQuantumToMap(
q->green)].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
q->blue=ClampToQuantum(stretch_map[ScaleQuantumToMap(
q->blue)].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
q->opacity=ClampToQuantum(stretch_map[ScaleQuantumToMap(
q->opacity)].opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
indexes[x]=(IndexPacket) ClampToQuantum(stretch_map[
ScaleQuantumToMap(indexes[x])].index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImageChannel)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(MagickPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define Enhance(weight) \
mean=((MagickRealType) r->red+pixel.red)/2; \
distance=(MagickRealType) r->red-(MagickRealType) pixel.red; \
distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \
mean)*distance*distance; \
mean=((MagickRealType) r->green+pixel.green)/2; \
distance=(MagickRealType) r->green-(MagickRealType) pixel.green; \
distance_squared+=4.0*distance*distance; \
mean=((MagickRealType) r->blue+pixel.blue)/2; \
distance=(MagickRealType) r->blue-(MagickRealType) pixel.blue; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
mean=((MagickRealType) r->opacity+pixel.opacity)/2; \
distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \
QuantumRange/25.0f)) \
{ \
aggregate.red+=(weight)*r->red; \
aggregate.green+=(weight)*r->green; \
aggregate.blue+=(weight)*r->blue; \
aggregate.opacity+=(weight)*r->opacity; \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
enhance_view=AcquireCacheView(enhance_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
aggregate;
MagickRealType
distance,
distance_squared,
mean,
total_weight;
PixelPacket
pixel;
register const PixelPacket
*restrict r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
r=p+(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+2*(image->columns+4);
Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0);
r=p+3*(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+4*(image->columns+4);
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
q->red=(Quantum) ((aggregate.red+(total_weight/2)-1)/total_weight);
q->green=(Quantum) ((aggregate.green+(total_weight/2)-1)/total_weight);
q->blue=(Quantum) ((aggregate.blue+(total_weight/2)-1)/total_weight);
q->opacity=(Quantum) ((aggregate.opacity+(total_weight/2)-1)/
total_weight);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
black,
*equalize_map,
*histogram,
intensity,
*map,
white;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (MagickPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (MagickPixelPacket *) NULL)
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(GetRedPixelComponent(p))].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(GetGreenPixelComponent(p))].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(GetBluePixelComponent(p))].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(GetOpacityPixelComponent(p))].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(indexes[x])].index++;
p++;
}
}
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) ((MaxMap*(map[i].opacity-black.opacity))/
(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=ClampToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
q->red=ClampToQuantum(equalize_map[ScaleQuantumToMap(q->red)].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
q->green=ClampToQuantum(equalize_map[ScaleQuantumToMap(
q->green)].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
q->blue=ClampToQuantum(equalize_map[ScaleQuantumToMap(q->blue)].blue);
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
q->opacity=ClampToQuantum(equalize_map[ScaleQuantumToMap(
q->opacity)].opacity);
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
indexes[x]=ClampToQuantum(equalize_map[ScaleQuantumToMap(
indexes[x])].index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImageChannel)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const char *level)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
if ((gamma.red == gamma.green) && (gamma.green == gamma.blue))
status=GammaImageChannel(image,(const ChannelType) (RedChannel |
GreenChannel | BlueChannel),(double) gamma.red);
else
{
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status|=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
}
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[
ScaleQuantumToMap(image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[
ScaleQuantumToMap(image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[
ScaleQuantumToMap(image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[
ScaleQuantumToMap(image->colormap[i].opacity)];
else
image->colormap[i].opacity=(Quantum) QuantumRange-
gamma_map[ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (channel == DefaultChannels)
{
q->red=gamma_map[ScaleQuantumToMap(q->red)];
q->green=gamma_map[ScaleQuantumToMap(q->green)];
q->blue=gamma_map[ScaleQuantumToMap(q->blue)];
}
else
{
if ((channel & RedChannel) != 0)
q->red=gamma_map[ScaleQuantumToMap(q->red)];
if ((channel & GreenChannel) != 0)
q->green=gamma_map[ScaleQuantumToMap(q->green)];
if ((channel & BlueChannel) != 0)
q->blue=gamma_map[ScaleQuantumToMap(q->blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=gamma_map[ScaleQuantumToMap(q->opacity)];
else
q->opacity=(Quantum) QuantumRange-gamma_map[
ScaleQuantumToMap((Quantum) GetAlphaPixelComponent(q))];
}
}
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) image->columns; x++)
indexes[x]=gamma_map[ScaleQuantumToMap(indexes[x])];
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImageChannel)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image)
% MagickBooleanType HaldClutImageChannel(Image *image,
% const ChannelType channel,Image *hald_image)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o channel: the channel.
%
*/
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image)
{
return(HaldClutImageChannel(image,DefaultChannels,hald_image));
}
MagickExport MagickBooleanType HaldClutImageChannel(Image *image,
const ChannelType channel,const Image *hald_image)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
MagickRealType
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=MagickMin(hald_image->columns,hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetMagickPixelPacket(hald_image,&zero);
exception=(&image->exception);
image_view=AcquireCacheView(image);
hald_view=AcquireCacheView(hald_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
offset;
HaldInfo
point;
MagickPixelPacket
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(hald_view);
pixel=zero;
pixel1=zero;
pixel2=zero;
pixel3=zero;
pixel4=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
point.x=QuantumScale*(level-1.0)*q->red;
point.y=QuantumScale*(level-1.0)*q->green;
point.z=QuantumScale*(level-1.0)*q->blue;
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel3);
offset+=cube_size;
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width),
&pixel1,exception);
(void) InterpolateMagickPixelPacket(image,hald_view,
UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/
width),&pixel2,exception);
MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2,
pixel2.opacity,point.y,&pixel4);
MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4,
pixel4.opacity,point.z,&pixel);
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=ClampToQuantum(pixel.index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImageChannel)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImage(image,black_point,white_point,gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the normal level operation to the image, spreading
% out the values between the black and white points over the entire range of
% values. Gamma correction is also applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma)
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const double black_point,
% const double white_point,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantiumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
% use 1.0 for purely linear stretching of image color values
%
*/
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
#define LevelQuantum(x) (ClampToQuantum((MagickRealType) QuantumRange* \
pow(scale*((double) (x)-black_point),1.0/gamma)))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register double
scale;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0;
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelQuantum(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelQuantum(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelQuantum(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=LevelQuantum(image->colormap[i].opacity);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=LevelQuantum(q->red);
if ((channel & GreenChannel) != 0)
q->green=LevelQuantum(q->green);
if ((channel & BlueChannel) != 0)
q->blue=LevelQuantum(q->blue);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
q->opacity=(Quantum) (QuantumRange-LevelQuantum(QuantumRange-
q->opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=LevelQuantum(indexes[x]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImageChannel)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantiumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma)
{
MagickBooleanType
status;
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) (ClampToQuantum(((MagickRealType) \
pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \
black_point))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=LevelizeValue(image->colormap[i].opacity);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=LevelizeValue(q->red);
if ((channel & GreenChannel) != 0)
q->green=LevelizeValue(q->green);
if ((channel & BlueChannel) != 0)
q->blue=LevelizeValue(q->blue);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
q->opacity=LevelizeValue(q->opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=LevelizeValue(indexes[x]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImageChannel)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelColorsImageChannel method is:
%
% MagickBooleanType LevelColorsImage(Image *image,
% const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
% MagickBooleanType LevelColorsImageChannel(Image *image,
% const ChannelType channel,const MagickPixelPacket *black_color,
% const MagickPixelPacket *white_color,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickExport MagickBooleanType LevelColorsImage(Image *image,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
MagickBooleanType
status;
status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color,
invert);
return(status);
}
MagickExport MagickBooleanType LevelColorsImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *black_color,
const MagickPixelPacket *white_color,const MagickBooleanType invert)
{
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickFalse;
if (invert == MagickFalse)
{
if ((channel & RedChannel) != 0)
status|=LevelImageChannel(image,RedChannel,
black_color->red,white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status|=LevelImageChannel(image,GreenChannel,
black_color->green,white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status|=LevelImageChannel(image,BlueChannel,
black_color->blue,white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
status|=LevelImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=LevelImageChannel(image,IndexChannel,
black_color->index,white_color->index,(double) 1.0);
}
else
{
if ((channel & RedChannel) != 0)
status|=LevelizeImageChannel(image,RedChannel,
black_color->red,white_color->red,(double) 1.0);
if ((channel & GreenChannel) != 0)
status|=LevelizeImageChannel(image,GreenChannel,
black_color->green,white_color->green,(double) 1.0);
if ((channel & BlueChannel) != 0)
status|=LevelizeImageChannel(image,BlueChannel,
black_color->blue,white_color->blue,(double) 1.0);
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
status|=LevelizeImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity,(double) 1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=LevelizeImageChannel(image,IndexChannel,
black_color->index,white_color->index,(double) 1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The LinearStretchImage() discards any pixels below the black point and
% above the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(PixelIntensityToQuantum(p))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white,
1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. And if the colorspace is
% HWB, use blackness, whiteness, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static void ModulateHWB(const double percent_hue,const double percent_whiteness, const double percent_blackness,Quantum *red,Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseMagickOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
{
/*
Modulate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
switch (colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
}
}
/*
Modulate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) QuantumRange-
image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) QuantumRange-
image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) QuantumRange-
image->colormap[i].blue;
}
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((q->red != q->green) || (q->green != q->blue))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
q->red=(Quantum) QuantumRange-q->red;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) QuantumRange-q->green;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) QuantumRange-q->blue;
if ((channel & OpacityChannel) != 0)
q->opacity=(Quantum) QuantumRange-q->opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) QuantumRange-indexes[x];
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=(Quantum) QuantumRange-q->red;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) QuantumRange-q->green;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) QuantumRange-q->blue;
if ((channel & OpacityChannel) != 0)
q->opacity=(Quantum) QuantumRange-q->opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) QuantumRange-indexes[x];
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImageChannel)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o alpha: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o beta: midpoint of the function as a color value 0 to QuantumRange.
%
*/
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*sigmoidal_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (sharpen != MagickFalse)
{
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*((1.0/(1.0+exp(contrast*(midpoint/(double) QuantumRange-
(double) i/MaxMap))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))/((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))+0.5));
continue;
}
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*(QuantumScale*midpoint-log((1.0-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))+((double) i/MaxMap)*((1.0/
(1.0+exp(contrast*(midpoint/(double) QuantumRange-1.0))))-(1.0/
(1.0+exp(midpoint/(double) QuantumRange*contrast))))))/
(1.0/(1.0+exp(midpoint/(double) QuantumRange*contrast))+
((double) i/MaxMap)*((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))))))/contrast)));
}
if (image->storage_class == PseudoClass)
{
/*
Sigmoidal-contrast enhance colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(q->red)]);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(q->green)]);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(q->blue)]);
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(sigmoidal_map[ScaleQuantumToMap(q->opacity)]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) ClampToQuantum(sigmoidal_map[
ScaleQuantumToMap(indexes[x])]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImageChannel)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
pi_spmd_simple_padded.c | /*
NAME: PI SPMD ... a simple version.
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The program was parallelized using OpenMP and an SPMD
algorithm. The following OpenMP specific lines were
added:
(1) A line to include omp.h -- the include file that
contains OpenMP's function prototypes and constants.
(2) A pragma that tells OpenMP to create a team of threads
with an integer variable i being created for each thread.
(3) two function calls: one to get the thread ID (ranging
from 0 to one less than the number of threads), and the other
returning the total number of threads.
(4) A cyclic distribution of the loop by changing loop control
expressions to run from the thread ID incremented by the number
of threads. Local sums accumlated into sum[id].
Note that this program will show low performance due to
false sharing. In particular, sum[id] is unique to each
thread, but adjacent values of this array share a cache line
causing cache thrashing as the program runs.
History: Written by Tim Mattson, 11/99.
*/
#include <stdio.h>
#include <omp.h>
#define MAX_THREADS 4
#define PADDING 8
static long num_steps = 100000000;
double step;
int main ()
{
int i, j;
double pi, full_sum = 0.0;
double start_time, run_time;
double sum[MAX_THREADS][PADDING];
int nthreads;
step = 1.0/(double) num_steps;
for (j = 1; j <= MAX_THREADS; j++) {
omp_set_num_threads(j);
full_sum = 0.0;
start_time = omp_get_wtime();
#pragma omp parallel
{
int i;
int id = omp_get_thread_num();
int numthreads = omp_get_num_threads();
double x;
sum[id][0] = 0.0;
if (id == 0) {
printf(" num_threads = %d",numthreads);
nthreads = numthreads;
}
for (i = id; i < num_steps; i += numthreads) {
x = (i + 0.5) * step;
sum[id][0] += 4.0 / (1.0 + x * x);
}
} // end of parallel region
for (full_sum = 0.0, i = 0; i < j; i++)
full_sum += sum[i][0];
pi = step * full_sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds %d thrds \n", pi, run_time, nthreads);
}
}
|
krb5_tgs_fmt_plug.c | /*
* Based on the work by Tim Medin
* Port from his Pythonscript to John by Michael Kramer (SySS GmbH)
*
* This software is
* Copyright (c) 2015 Michael Kramer <michael.kramer@uni-konstanz.de>,
* Copyright (c) 2015 magnum
* Copyright (c) 2016 Fist0urs <eddy.maaalou@gmail.com>
*
* Modified by Fist0urs to improve performances by proceeding known-plain
* attack, based on defined ASN1 structures (then got rid of RC4 rounds
* + hmac-md5)
*
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5tgs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5tgs);
#else
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#include "dyna_salt.h"
#include "rc4.h"
#include "md4.h"
#include "hmacmd5.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#define FORMAT_LABEL "krb5tgs"
#define FORMAT_NAME "Kerberos 5 TGS etype 23"
#define ALGORITHM_NAME "MD4 HMAC-MD5 RC4"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define MIN_PLAINTEXT_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(struct custom_salt *)
#define SALT_ALIGN sizeof(struct custom_salt *)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/*
assuming checksum == edata1
formats are:
checksum$edata2
$krb5tgs$23$checksum$edata2
$krb5tgs$23$*user*realm*spn*$checksum$edata2
*/
static struct fmt_tests tests[] = {
{"74809c4c83c3c8279c6058d2f206ec2f$78b4bbd4d229487d5afc9a6050d4144ce10e9245cdfc0df542879814ce740cebb970ee820677041596d7e55836a18cc95c04169e7c74a4a22ae94e66f3d37150e26cc9cb99e189ef54feb7a40a8db2cb2c41db80d8927c74da7b33b52c58742d2109036b8ab27184609e7adff27b8f17b2f2a7b7d85e4ad532d8a70d48685a4390a9fc7a0ab47fd17334534d795abf83462f0db3de931c6a2d5988ab5bf3253facfff1381afb192ce385511c9052f2915ffdb7ea28a1bbad0573d9071e79dc15068527d50100de8813793a15c292f145fa3797ba86f373a4f0a05e5f2ec7dbfd8c8b5139cc7fbb098ea1dd91a7440134ffe2aff7174d0df13dcad82c81c680a70127a3ec8792bdecd74a878f97ff2b21277dc8c9a2f7bbcd9f72560dd933d85585259067d45a46a6f505d03f188b62c37edf03f117503a26743ebd674d5b07324c15fc8418881613b365402e0176da97d43cf85e8239b69aee07791233a959bcaf83a7f492fa718dd0a1747eaf5ce626eb11bda89e8235a056e2721f45c3b61442d893ef32a8c192ea0dadb853f3c6f3c75e92f23c744605c6f55578f696b0f33a9586b8aae3e12e38a097692cd9a31d780d973eaaf62ef23b2fc9ae59a38bfd8ea14d3289b46910f61a90aa733e66382bc27f40ba634e55ef1bec0ca7f71546b79566d85664b92f9fae495fcef5cde4c4399a6798569a7e81b9cc4bdde7104f3fe181401f82bba944e3b0a406c7093c00ff9d5984a82517b1a64a8aa561bc1f0cbafbdbbc5654d375c91d4e485e17bb06838109fbc1504147481c91652f545086a84daa423a6286ea6bb13460c5ff3d865a7b37b9ce4e7b07fbe2f6897c12c1e4df2e875c1ec9cfbf84097a7f48b270baf3481263b21849ab93c231490d06a23461a5e00c23df76bca8e5a19256d859304e1f5752bf055ac7f4843e1ad174f1cbbf5c142958f9310025ce439d5979982fb0b8c2ea95e1a22ee8dc63423d9d364cb0b95bcdf89ec4ed485b9005326d728757d77aa3e020a4a61d7deb782bc5264dca350173609772cd6d003ee8104dd24d310c9a18a44f78e27d65095f5bb54f6118c8f0d79ad5a850cec8d40a19bd0134144e904c9eb7fdcff3293696071fc1118f6b2f934281a25bcd5ca7d567714b1e43bd6d09bfcc8744c0ca273a75938394ac2fb31957287093346078577c94a71dfa6ad4a63211f54f00ef7a9064d070aaff84116ee891728915c938a8a32e87aaa00ec18e2b4e9ae88f7e53f08d855052a995f92351be32d8df934eab487103b0f089828e5fb5f73af3a8a05b9fffd25c43a392994743de3de1a2a9b8bba27e02ae2341f09d63aafab291759c41b9635521ca02f08e21e7e5c3ce75c8c3515eaa99aeb9bf8e204663e8b6b8507ecf87230a131687b4770e250ba0ef29fa3ca3b47b34971e17c6a3ef785acdd7c90da9d2", "test123"},
{"$krb5tgs$23$ee09e292a05e3af870417758b1cdfd04$a1a480b8505d2f2f0ff06ddce40c2f6e76bd06fa64dcd5b0646a68effcd686b2e41562ebda90da7e7b36d95cd16ca8a33b8d99184d6b7fa7a2efec3a05dcb63b3e815ffd38849dc69174d1efb3a871544b73a6da55d2331bd4b60743d1654873e3c1748ce155c35a1711695296ab944d158e374b67f43dd07eab2bcacec1be480e5c1338e3834f7133909f5c7970ece39e73bd96d40f696cb5a8575e5e1feab937b616d6180cc3258e22b9fc495017593e15fc10e674af8184c282a0d80902ea9dabda5fb0a56d7980bfd4b62b330155cd8e318dc5be55500cb8ddd691b629af371463c411f1c11d21811e1546477b85f0a85e296f5df737930aff5015111d2f01a236ab7c77e9dab001f52400cccbcdb31bb180db027bd0fa2f6000dce7c1e072c0effbdee23a401720b1fe54a09a75430497f42f6e047d62d1123866d6ed37e58f8e2c1e462acb1a97a44a5ccef49897af190a46b3ab057d18c1e47d717c7a63658357d58d9cd5b7672f0a946f95f6e2ec3aee549e20e3b11237ea59f87723f24e03a6fac9e51086bc84142631ed36ee6855920f3d3d1e85d0faaa0a8b04a2b050b17f94d44af7f48302fa70dcf43279415983924e5d874c59722b6fb87ad1006fcb51e4341bb2cc4caf8c4b7993269af219cf4efa12b1009961c22f123c35f982e4ca75a97cd37f7f16be111ad301637ffb1664ccb021d3cf6bf771e07dc42202dac079c6bd7559f8e7a939bc14e9ddb45fe1b88c5f83b1ff966342bb9211afd15772cf5f871d39d0b30776d51d84b046df30d250c1877d146047e784c4bc2e6745f357dd0b1c6aaa11e26a0e3c2772781695f6a3bc536ba19e2327ec8c0866bd78d3b5b067abcf6991eafc8b7a11ad4049711263f3c68b358f246da1308d5a0daac1d7efedbc237be3d6a4bafe5ce66e941f7227d2b869bda637dfd223a4546340c59e7d0e2b58f60a67590468a53a5d28cc35cec36a9c5610c70c0633767539640b42cff787f4782057ff70d0e64658413429347f5449c1360da4d0827c4197bbb0361c6d0e04bcaf6bba1233912f806772146c0e778ac06749bbd3d8819007d070ae912580ff11a41f71b0907b88fb585585ebe42b4cc4ecde8ff7b49a856dd70f316425e53feff3ee6ca1e44d9ba5e607a41cf26edf44bffe2796f94ea2d767fbf81f665a7fedf0291e76c6fa409dc99c56954f21edc77f6173c5a3a909c8756f3cc5cc6c2d2e405f333ee0b50284aacfb81f9dfc6058b78b282db4511580eb623dc393919befc250d224490936e5fb16c483f4bd00c8915288d0ddf3812eaa3d46ad5a24c56390076730d23b2de6558ddadddba725f9b4a73d13de3e1276fc285194e3a2f613d9b020d0485d7e26b36b7b917f4911024127320627066fabbd465b4cd5d5fdebae804d15db0f5b276659364bec32a13a8d9e11349f54bd", "bluvshop2"},
{"$krb5tgs$23$*Fist0urs$MYDOMAIN$cifs/panda.com*$423cb47a258e5859c13381ae64de7464$8dd47d94e288a1b32af726d2eac33710fb1610e4c6f674907d7a74d26515a314173b2b531baa790b70467ebe538fc9e941bf4d7f7218a4ec17c1dc963b717d5837fcd5ae678189101a1b4831a53a1322ca6e8f5d644e4aa72e99bedb4a0e967c3e05ccdcc96137265612969a1214a71038dea845250cac45551963fe85f193d88aa39ed57b95b934295e17de04ebf0ad275df67f65fb1fc2ee3095c6af02c4c1b8efa570e1c2ac562601c5ac89bd6f59ca8b957660aa00787d4a0f9d9f29b15eb3b85823f7c9814eab9106210c37d863cf8413391c5941a994fdd52a44e4f8e8e4c9b8b520e62015fb5ed40e91e7a453b3ddcefb888fd896c187993a899b6a30d27a5b2b7847a410c0cce8b0fcf90367bfd8e6dfa7eb37676ecdf500c9a51ffb59792c13e222371e024f857134b7039931daa66a6902da37e71c41adf83846a9df1e75575696d7a6f1744d48e8215849773903c9475c29a1ec0fcc11257f9479467c2b65679a3da298e6806d619794dfc06b10b5e0a46e395c3ade3d750292f244cabb7172d83dbd42c6e3bd5a93a8c2d5fe84b23a3c60508733f5a087763f2fa514d18f891461b8ea22f7eaa847906182bd0415c28d197c06df8449cc2c6c2016c38672a67613a14ccac9025c4da85fc0825dcd9a1269e6064f80c0de445fbdd237d35ab0eb6ae468413c5b17c9955a8c8c34952c8a188bad7e5b18651a75b1c46cf116422378a94a19c31dfa634c8ab15f4f13e7e427741ab9e8f247b4a8fe2562986ee21f602b4fad45bd535718020b764da6f346e3b028db8a1af88419f3ea9141fcf0c622ed40d894814e5d60a9dcdfc8344f802c7b2f0089131e57ac0cc071af13c3b2b7302e9df4665c48b91f4ef0bb2a60a272e5841e0ee8da01a91773d41f295514b65ccb2190195f720d9838b3e7c701b51e813ef0262fbdbbe06391ba3fe4232e74523dfa933e6d3df2494ddd9f254afdf97623ceb5d32483a870cf72a57617bdbf97f0420c041edb5a884ff401dc21da0472d7a75d89dc9937fd65c3a422063ea44e3954435d38b8f34cec2c0360c8bef392f77fbab76a7b801e05b467d4980d20f0a7dbc1c39f50ce4429df1ec167c6be67d2fbd507a3f7b5d98cf214ae0510fac51e1075a06250d65a3a1179486bda5d982b7904682835079e3042f39a582492cd14dbafb5826e242c81998752043e2dd91b648f115900595f5191a01f187c4b6dea4917e4773a5fb28cb1d20508142a3905068c931a8c9a8fa291b92f8ece9884affd8787a5aa11858274879160e930587f3c32e2cabbd124c708641df09f82d05ab4db157ad24931dc36c616dbb778762ead6a8491ce8a48037106d382283ac69422c04af3ae2cbe22eff6dff21bc34b154a5fab666870c59aba65bd4e0ea0be3f394bb4901fd64a0e19293b8026188615c84601b7fecdb62b", "jlcirr."},
// 1-40a00000-john@HTTP-adc1.toor.com-TOOR.COM.kirbi file
{"$krb5tgs$23$e43ae991b15bbd11c19395c6c785f4d4$07ea84f4cf5ab2ad5a1a15c5776e7bc024d26451771e653c9cb0b87d8a5d73317f992913621a61039d143818585aee976b5273f53023d28a1da22c8a2f79e47956da4221bd10809fb777b4684cbbc102bda46dc816eb5a5315196f1b2cd47fee6ddc1adae753c96eefe77bf8e8e54e33489595f0c3cb47db9bef77438f666c15de4ee9893839c5280daebd81d476a00944f8282eed61af43578fc6f68dbb47ad9106ea1f58125355506016ccf997d35d8ccad169ba7eebe27e76d19188a227158172b405c7e053da1e3bafae4cd39594e7a03e7a96bdbc63a793fba6c26135d6d1789395f0155341e04f80097540ffb1f299f61960a34db3ea14b95b4633b7eea3a552140e7e42708009fdda3d1b42b3297142bfc036abd3d28f07ba1c8362e1c5b346f55af7214314a92fa412733825f55fe4a56b56859af00eb4f69cc7ad339b7bc8032ff1057be3e73c5533f4f546e599ecbf60305569c9b87b22971ef012ff92f582688b001ad23901dae743c46cae6603f7b6b88db78fcfd59997e8a1078f8a27e28a6628bc59d78674d9d16a6413da369ab58cb702dba01c710fbfed87f4665dfb3cc4a8f83ebf960435ae96973e699cd419324ddf115825c99890b2bb8e35ce0005a2adf95ce691b135358c63aa87088ed615c5a9667927e691bf7135677893abc41c038d25ff9091c14e3d1da85c7f0edaed32c9b3b56d2c473b2363b93aae5cc9b02db47e7a22a639a951e2edce7580f691c2ee0f8ebdfb02cdc6de8d1028e34085d1a69cdebb00a430b5ddce223bd1cc9c66b746a46584c098f051b97180ee8db4268a3a838504884df45227cac6fe9e73704877f04558c9640ac2ed33b3216b2e17805863a955285f4633407097f439d7063faeacdcee0d6d4e6c2adbe85df0e51eb3c08da1cedb4fa70ff74b2711a7294b499597c1f30c1dd3cc12751692311a16e22b3fa6af75eb0ace4170df497ba860445b1fc964771eafc034515918bb080a6d05ab1170708e6ce80bf9b00f808a2b814e89d0ac9b5d1a23686b48e99fdc50c71b5fef8a9bfc851e40bed59f69821109be0119151768e4d91b8b00c46b39af207ad4a2566ce7751ac124c3c5851cd1026052d34988272bf2851bd1a4536816a7635d83c1378b442eb04c15d5028763e0b189c8f45703c54d62aaea570c9e56b0e721d170cda74f91a4101c495fb565bb03f2ad635335c88db112dfb073bb4d1547de3214de5e371bfe9b440de3882f7b83593ca0fc60f4e6e2e3885b2a365a56b529904c74bc58ab38432f0dfbbd3f4d543f9d8685b0aa69aa807701e09e1253b6ed4948c7ceaaafdd0baed2663881d52a163101a5bb697a65b2bfcc54d0dd", "1qaz@WSX"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*saved_K1)[16];
static int any_cracked, *cracked;
static size_t cracked_size;
static int new_keys;
static struct custom_salt {
dyna_salt dsalt;
unsigned char edata1[16];
uint32_t edata2len;
unsigned char* edata2;
} *cur_salt;
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char *ptr, *keeptr;
int i;
ptr = mem_alloc_tiny(strlen(ciphertext) + 12 + 1, MEM_ALIGN_NONE);
keeptr = ptr;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) != 0) {
memcpy(ptr, "$krb5tgs$23$", 12);
ptr += 12;
}
for (i = 0; i < strlen(ciphertext) + 1; i++)
ptr[i] = tolower(ARCH_INDEX(ciphertext[i]));
return keeptr;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) == 0) {
/* handle 'chopped' .pot lines (they always have the tag!) */
if (ldr_isa_pot_source(ciphertext)) {
MEM_FREE(keeptr);
return 1;
}
ctcopy += 12;
if (ctcopy[0] == '*') { /* assume account's info provided */
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2; /* set after '$' */
goto edata;
}
if (ctcopy[0] == '$')
ctcopy++;
}
edata:
/* assume checksum */
if (((p = strtokm(ctcopy, "$")) == NULL) || strlen(p) != 32)
goto err;
/* assume edata2 following */
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_alloc_align(sizeof(*saved_key) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
saved_K1 = mem_alloc_align(sizeof(*saved_K1) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(saved_K1);
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p;
char *ctcopy;
char *keeptr;
static void *ptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
memset(&cs, 0, sizeof(cs));
cs.edata2 = NULL;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) == 0) {
ctcopy += 12;
if (ctcopy[0] == '*') {
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2;
goto edata;
}
if (ctcopy[0]=='$')
ctcopy++;
}
edata:
if (((p = strtokm(ctcopy, "$")) != NULL) && strlen(p) == 32) { /* assume checksum */
for (i = 0; i < 16; i++) {
cs.edata1[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
/* skip '$' */
p += strlen(p) + 1;
/* retrieve non-constant length of edata2 */
for (i = 0; p[i] != '\0'; i++)
;
cs.edata2len = i/2;
cs.edata2 = (unsigned char*) mem_calloc_tiny(cs.edata2len + 1, sizeof(char));
for (i = 0; i < cs.edata2len; i++) { /* assume edata2 */
cs.edata2[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
}
MEM_FREE(keeptr);
/* following is used to fool dyna_salt stuff */
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, edata1);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, edata1, edata2len, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt**)salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, strlen(key) + 1);
new_keys = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
const unsigned char data[4] = {2, 0, 0, 0};
int index;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char K3[16];
#ifdef _MSC_VER
unsigned char ddata[65536];
#else
unsigned char ddata[cur_salt->edata2len + 1];
#endif
unsigned char checksum[16];
RC4_KEY rckey;
if (new_keys) {
MD4_CTX ctx;
unsigned char key[16];
UTF16 wkey[PLAINTEXT_LENGTH + 1];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index],
strlen(saved_key[index]));
if (len <= 0) {
saved_key[index][-len] = 0;
len = strlen16(wkey);
}
MD4_Init(&ctx);
MD4_Update(&ctx, (char*)wkey, 2 * len);
MD4_Final(key, &ctx);
hmac_md5(key, data, 4, saved_K1[index]);
}
hmac_md5(saved_K1[index], cur_salt->edata1, 16, K3);
RC4_set_key(&rckey, 16, K3);
RC4(&rckey, 32, cur_salt->edata2, ddata);
/*
8 first bytes are nonce, then ASN1 structures
(DER encoding: type-length-data)
if length >= 128 bytes:
length is on 2 bytes and type is
\x63\x82 (encode_krb5_enc_tkt_part)
and data is an ASN1 sequence \x30\x82
else:
length is on 1 byte and type is \x63\x81
and data is an ASN1 sequence \x30\x81
next headers follow the same ASN1 "type-length-data" scheme
*/
if (((!memcmp(ddata + 8, "\x63\x82", 2)) && (!memcmp(ddata + 16, "\xA0\x07\x03\x05", 4)))
||
((!memcmp(ddata + 8, "\x63\x81", 2)) && (!memcmp(ddata + 16, "\x03\x05\x00", 3)))) {
/* check the checksum to be sure */
RC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);
hmac_md5(saved_K1[index], ddata, cur_salt->edata2len, checksum);
if (!memcmp(checksum, cur_salt->edata1, 16)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
new_keys = 0;
return *pcount;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_krb5tgs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
MIN_PLAINTEXT_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT,
{NULL},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{NULL},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
TRPO_CG_FPGA.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
#include "Maxfiles.h"
#include "MaxSLiCInterface.h"
double CG_FPGA (TRPOparam param, double *Result, double *b, size_t MaxIter, double ResidualTh, size_t NumThreads){
//////////////////// Conjugate Gradient ////////////////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// Result: The Conjugate Gradient Result, i.e. solution x to Ax=b
// b: Vector b in the equation Ax=b
// MaxIter: Maximum Iterations of Conjugate Gradient (in modular_rl is 10)
// ResidualTh: Threshold of Residual (in modular_rl is 1e-10)
// NumThreads: Number of Threads to use
//////////////////// Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters - For CPU and FPGA
const size_t NumLayers = param.NumLayers;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
const size_t NumParams = NumParamsCalc(LayerSize, NumLayers);
// Assign Parameters - For FPGA Only
size_t * PaddedLayerSize = param.PaddedLayerSize;
size_t * NumBlocks = param.NumBlocks;
// Dimension of Observation Space and Action Space
const size_t ObservSpaceDim = LayerSize[0];
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// Calculate BlockDim
size_t * BlockDim = (size_t *) calloc(NumLayers, sizeof(size_t));
for (int i=0; i<NumLayers; ++i) BlockDim[i] = PaddedLayerSize[i] / NumBlocks[i];
// Length of Weight and VWeight Initialisation Vector
int WeightInitVecLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
WeightInitVecLength += 2 * BlockDim[i] * PaddedLayerSize[i+1];
}
// Number of Cycles to Run on FPGA - Pipelined Forward and Back Propagation
// Remarks: Here we assume 4 layers
size_t MaxBlkDim0Dim2 = (BlockDim[0]>BlockDim[2]) ? BlockDim[0] : BlockDim[2];
size_t FwdCyclesPerSample = BlockDim[0] + (BlockDim[1]-1)*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t BwdCyclesPerSample = BlockDim[1]*MaxBlkDim0Dim2 + BlockDim[2]*BlockDim[3];
size_t CyclesPerSample = (FwdCyclesPerSample>BwdCyclesPerSample) ? FwdCyclesPerSample : BwdCyclesPerSample;
size_t PropCyclesTotal = CyclesPerSample * (NumSamples + 1);
// Number of Cycles to Run on FPGA - Read Result Back
size_t FVPLength = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
FVPLength += PaddedLayerSize[i] * PaddedLayerSize[i+1];
FVPLength += PaddedLayerSize[i+1];
}
int PaddedFVPLength = ((int)ceil((double)FVPLength/2))*2;
// Number of Cycles to Run on FPGA for Each FVP Computation - Total
size_t NumTicks = WeightInitVecLength + PropCyclesTotal + PaddedFVPLength + 20;
// Allocation Memory Space for FVP Result
double * FVPResult = (double *) calloc(PaddedFVPLength, sizeof(double));
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
double * Action = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Advantage = (double *) calloc(NumSamples, sizeof(double));
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this LogStd will be overwritten by the Std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &Std[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Vector b and Init Result Vector ////////////////////
// Initialisation - CG
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Initialisation - FVP
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = b[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = b[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = b[pos];
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
// Read Action
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Action[i*ActionSpaceDim+j]);
}
// Read Advantage
fscanf(DataFilePointer, "%lf", &Advantage[i]);
}
// Close Data File
fclose(DataFilePointer);
//////////////////// FPGA - Initialisation ////////////////////
// Load Maxfile and Engine
fprintf(stderr, "[INFO] Initialising FPGA...\n");
max_file_t* maxfile = TRPO_init();
max_engine_t* engine = max_load(maxfile, "*");
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
// Length of Observation Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ObservVecLength = WeightInitVecLength + NumSamples*BlockDim[0];
size_t ObservVecWidth = NumBlocks[0];
size_t ActualObservVecItems = ObservVecLength * ObservVecWidth;
size_t PaddedObservVecItems = (size_t) 48 * ceil( (double)ActualObservVecItems/48 );
fprintf(stderr, "[INFO] Observation Vector (%zu bytes) padded to %zu bytes\n", ActualObservVecItems*8, PaddedObservVecItems*8);
double * Observation = (double *) calloc(PaddedObservVecItems, sizeof(double));
// Length of DataP Vector
// Remarks: DRAM Write requires data bit-size to be a multiple of 384bytes
// Namely, the number of items must be a multiple of 48
size_t ActualDataPVecItems = WeightInitVecLength * NumBlocks[0];
size_t PaddedDataPVecItems = (size_t) 48 * ceil( (double)ActualDataPVecItems/48 );
fprintf(stderr, "[INFO] Vector P (%zu bytes) padded to %zu bytes\n", ActualDataPVecItems*8, PaddedDataPVecItems*8);
double * DataP = (double *) calloc(PaddedDataPVecItems, sizeof(double));
// Number of Ticks for each CG iteration
fprintf(stderr, "[INFO] In each iteration FPGA will run for %zu cycles.\n", NumTicks);
// Feed Weight and VWeight into Observation
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
Observation[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Feed actual observation data into Observation
for (size_t iter=0; iter<NumSamples; ++iter) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
if (RowNumPadded<RowNumLimit) Observation[RowNum*ObservVecWidth+X] = Observ[iter*ObservSpaceDim+RowNumPadded];
else Observation[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
// Length of BiasStd Vector
size_t BiasStdVecLength = PaddedLayerSize[NumLayers-1];
for (size_t i=1; i<NumLayers; ++i) {
BiasStdVecLength += 2*PaddedLayerSize[i];
}
double * BiasStd = (double *) calloc(BiasStdVecLength, sizeof(double));
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = VB[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed (1/Std)^2 into BiasStd
for (size_t k=0; k<PaddedLayerSize[NumLayers-1]; ++k) {
size_t LayerDimLimit = LayerSize[NumLayers-1];
if (k<LayerDimLimit) BiasStd[RowNum] = 1.0 / Std[k] / Std[k];
else BiasStd[RowNum] = 0;
RowNum++;
}
// Init FPGA
fprintf(stderr, "[INFO] Loading Model and Simulation Data...\n");
TRPO_WriteDRAM_actions_t init_action;
init_action.param_start_bytes = 0;
init_action.param_size_bytes = PaddedObservVecItems * sizeof(double);
init_action.instream_fromCPU = Observation;
TRPO_WriteDRAM_run(engine, &init_action);
//////////////////// CG - Main Loop ////////////////////
// Measuring Total Time and Total Computing Time
double runtimeComp = 0;
struct timeval tv1, tv2;
struct timeval tv3, tv4;
// Iterative Solver
gettimeofday(&tv3, NULL);
for (size_t iter=0; iter<=MaxIter; ++iter) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", iter, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || iter==MaxIter) {
for (size_t i=0; i<NumParams; ++i) Result[i] = x[i];
break;
}
//////////////////// FPGA - Load p ////////////////////
// Read p into VW, VB and VLogStd
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = p[pos];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = p[pos];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = p[pos];
pos++;
}
// Feed VW, VB and VLogStd into DataP
size_t RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
// Parameters of current
size_t InBlockDim = BlockDim[ID];
size_t NumInBlocks = NumBlocks[ID];
size_t OutBlockDim = BlockDim[ID+1];
size_t NumOutBlocks = NumBlocks[ID+1];
size_t OutLayerSize = LayerSize[ID+1];
// Feed Weight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = W[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
// Feed VWeight of Layer[ID]
for (size_t Y=0; Y<NumOutBlocks; ++Y) {
for (size_t addrX=0; addrX<InBlockDim; ++addrX) {
for (size_t addrY=0; addrY<OutBlockDim; ++addrY) {
for (size_t X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[ID];
size_t ColNumPadded = Y*OutBlockDim + addrY;
size_t ColNumLimit = LayerSize[ID+1];
if ( (RowNumPadded < RowNumLimit) && (ColNumPadded < ColNumLimit) ) {
DataP[RowNum*ObservVecWidth+X] = VW[ID][RowNumPadded*OutLayerSize + ColNumPadded];
}
else DataP[RowNum*ObservVecWidth+X] = 0;
}
RowNum++;
}
}
}
}
// Pad actual observation data into DataP
bool isPadding = true;
for (size_t iter=0; iter<NumSamples && isPadding; ++iter) {
size_t InBlockDim = BlockDim[0];
size_t NumInBlocks = NumBlocks[0];
for (int addrX=0; addrX<InBlockDim && isPadding; ++addrX) {
for (int X=0; X<NumInBlocks; ++X) {
size_t RowNumPadded = X*InBlockDim + addrX;
size_t RowNumLimit = LayerSize[0];
size_t posDataP = RowNum*ObservVecWidth+X;
if (posDataP<PaddedDataPVecItems) {
if (RowNumPadded<RowNumLimit) DataP[posDataP] = Observ[iter*ObservSpaceDim+RowNumPadded];
else DataP[posDataP] = 0;
}
else {
isPadding = false;
break;
}
}
RowNum++;
}
}
// Feed Bias and VBias into BiasStd
RowNum = 0;
for (size_t ID=0; ID<NumLayers-1; ++ID) {
size_t nextLayerDim = PaddedLayerSize[ID+1];
size_t nextLayerDimLimit = LayerSize[ID+1];
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = B[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
for (size_t k=0; k<nextLayerDim; ++k) {
if (k<nextLayerDimLimit) BiasStd[RowNum] = VB[ID][k];
else BiasStd[RowNum] = 0;
RowNum++;
}
}
// Feed DataP to BRAM
TRPO_WriteDRAM_actions_t write_action;
write_action.param_start_bytes = 0;
write_action.param_size_bytes = PaddedDataPVecItems * sizeof(double);
write_action.instream_fromCPU = DataP;
TRPO_WriteDRAM_run(engine, &write_action);
//////////////////// FPGA - Calc z = FIM*p ////////////////////
// Init Advanced Static Interface
TRPO_Run_actions_t run_action;
run_action.param_NumSamples = NumSamples;
run_action.param_PaddedObservVecItems = PaddedObservVecItems;
run_action.instream_BiasStd = BiasStd;
run_action.outstream_FVP = FVPResult;
// Run DFE and Measure Elapsed Time
gettimeofday(&tv1, NULL);
TRPO_Run_run(engine, &run_action);
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
// Read FVP into z
pos = 0;
size_t FVPPos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerSizePadded = PaddedLayerSize[i];
size_t nextLayerSizePadded = PaddedLayerSize[i+1];
size_t curLayerSizeReal = LayerSize[i];
size_t nextLayerSizeReal = LayerSize[i+1];
for (size_t j=0; j<curLayerSizePadded; ++j) {
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if ( (j<curLayerSizeReal) && (k<nextLayerSizeReal) ) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<nextLayerSizePadded; ++k) {
if (k<nextLayerSizeReal) {
z[pos] = FVPResult[FVPPos];
pos++;
}
FVPPos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
z[pos] = 2 * NumSamples * VLogStd[k];
pos++;
}
gettimeofday(&tv1, NULL);
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
z[i] = z[i] / (double)NumSamples;
z[i] += CG_Damping * p[i];
}
//////////////////// FPGA - End ////////////////////
// Update x and r
double pdotz = 0;
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
gettimeofday(&tv2, NULL);
runtimeComp += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
}
gettimeofday(&tv4, NULL);
double runtimeTotal = ((tv4.tv_sec-tv3.tv_sec) * (double)1E6 + (tv4.tv_usec-tv3.tv_usec)) / (double)1E6;
fprintf(stderr, "[INFO] Total Time for FPGA is %f seconds. Pure Computing Time is %f seconds.\n", runtimeTotal, runtimeComp);
//////////////////// Clean Up ////////////////////
fprintf(stderr, "[INFO] Clean up...\n");
// Free Engine and Maxfile
max_unload(engine);
TRPO_free();
// Free Memories Allocated for Reading Files
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]);
free(B[i]); free(VB[i]);
}
free(Observ); free(Mean); free(Std); free(Action); free(Advantage); free(VLogStd);
// Free Memories Allocated for DFE
free(Observation); free(BiasStd); free(FVPResult);
// Free Memories Allocated for CG
free(p); free(r); free(x); free(z); free(DataP);
return runtimeComp;
}
|
stencil.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: Stencil
PURPOSE: This program tests the efficiency with which a space-invariant,
linear, symmetric filter (stencil) can be applied to a square
grid or image.
USAGE: The program takes as input the linear dimension of the grid,
and the number of iterations on the grid
<progname> <#threads><# iterations> <grid size>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than MPI or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
HISTORY: - Written by Rob Van der Wijngaart, November 2006.
- RvdW, August 2013: Removed unrolling pragmas for clarity;
fixed bug in compuation of width of strip assigned to
each rank;
- RvdW, August 2013: added constant to array "in" at end of
each iteration to force refreshing of neighbor data in
parallel versions
- RvdW, October 2014: introduced 2D domain decomposition
- RvdW, October 2014: removed barrier at start of each iteration
- RvdW, October 2014: replaced single rank/single iteration timing
with global timing of all iterations across all ranks
*********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_mpiomp.h>
#if DOUBLE
#define DTYPE double
#define MPI_DTYPE MPI_DOUBLE
#define EPSILON 1.e-8
#define COEFX 1.0
#define COEFY 1.0
#define FSTR "%lf"
#else
#define DTYPE float
#define MPI_DTYPE MPI_FLOAT
#define EPSILON 0.0001f
#define COEFX 1.0f
#define COEFY 1.0f
#define FSTR "%f"
#endif
/* define shorthand for indexing multi-dimensional arrays with offsets */
#define INDEXIN(i,j) (i+RADIUS+(long)(j+RADIUS)*(long)(width+2*RADIUS))
/* need to add offset of RADIUS to j to account for ghost points */
#define IN(i,j) in[INDEXIN(i-istart,j-jstart)]
#define INDEXOUT(i,j) (i+(j)*(width))
#define OUT(i,j) out[INDEXOUT(i-istart,j-jstart)]
#define WEIGHT(ii,jj) weight[ii+RADIUS][jj+RADIUS]
int main(int argc, char ** argv) {
int Num_procs; /* number of ranks */
int Num_procsx, Num_procsy; /* number of ranks in each coord direction */
int my_ID; /* MPI rank */
int my_IDx, my_IDy; /* coordinates of rank in rank grid */
int right_nbr; /* global rank of right neighboring tile */
int left_nbr; /* global rank of left neighboring tile */
int top_nbr; /* global rank of top neighboring tile */
int bottom_nbr; /* global rank of bottom neighboring tile */
DTYPE *top_buf_out; /* communication buffer */
DTYPE *top_buf_in; /* " " */
DTYPE *bottom_buf_out; /* " " */
DTYPE *bottom_buf_in; /* " " */
DTYPE *right_buf_out; /* " " */
DTYPE *right_buf_in; /* " " */
DTYPE *left_buf_out; /* " " */
DTYPE *left_buf_in; /* " " */
int root = 0;
int n, width, height;/* linear global and local grid dimension */
long nsquare; /* total number of grid points */
int i, j, ii, jj, kk, it, jt, iter, leftover; /* dummies */
int istart, iend; /* bounds of grid tile assigned to calling rank */
int jstart, jend; /* bounds of grid tile assigned to calling rank */
DTYPE norm, /* L1 norm of solution */
local_norm, /* contribution of calling rank to L1 norm */
reference_norm;
DTYPE f_active_points; /* interior of grid with respect to stencil */
DTYPE flops; /* floating point ops per iteration */
int iterations; /* number of times to run the algorithm */
double local_stencil_time,/* timing parameters */
stencil_time,
avgtime;
int stencil_size; /* number of points in stencil */
int nthread_input, /* thread parameters */
nthread;
DTYPE * RESTRICT in; /* input grid values */
DTYPE * RESTRICT out; /* output grid values */
long total_length_in; /* total required length to store input array */
long total_length_out;/* total required length to store output array */
int error=0; /* error flag */
DTYPE weight[2*RADIUS+1][2*RADIUS+1]; /* weights of points in the stencil */
MPI_Request request[8];
/*******************************************************************************
** Initialize the MPI environment
********************************************************************************/
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_ID);
MPI_Comm_size(MPI_COMM_WORLD, &Num_procs);
/*******************************************************************************
** process, test, and broadcast input parameters
********************************************************************************/
if (my_ID == root) {
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("MPI+OPENMP stencil execution on 2D grid\n");
#ifndef STAR
printf("ERROR: Compact stencil not supported\n");
error = 1;
goto ENDOFTESTS;
#endif
if (argc != 4){
printf("Usage: %s <#threads><#iterations> <array dimension> \n",
*argv);
error = 1;
goto ENDOFTESTS;
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
error = 1;
goto ENDOFTESTS;
}
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
error = 1;
goto ENDOFTESTS;
}
n = atoi(*++argv);
nsquare = (long) n * (long) n;
if (nsquare < Num_procs){
printf("ERROR: grid size %ld must be at least # ranks: %d\n",
nsquare, Num_procs);
error = 1;
goto ENDOFTESTS;
}
if (RADIUS < 0) {
printf("ERROR: Stencil radius %d should be non-negative\n", RADIUS);
error = 1;
goto ENDOFTESTS;
}
if (2*RADIUS +1 > n) {
printf("ERROR: Stencil radius %d exceeds grid size %d\n", RADIUS, n);
error = 1;
goto ENDOFTESTS;
}
ENDOFTESTS:;
}
bail_out(error);
/* determine best way to create a 2D grid of ranks (closest to square) */
factor(Num_procs, &Num_procsx, &Num_procsy);
my_IDx = my_ID%Num_procsx;
my_IDy = my_ID/Num_procsx;
/* compute neighbors; don't worry about dropping off the edges of the grid */
right_nbr = my_ID+1;
left_nbr = my_ID-1;
top_nbr = my_ID+Num_procsx;
bottom_nbr = my_ID-Num_procsx;
MPI_Bcast(&n, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&iterations, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&nthread_input, 1, MPI_INT, root, MPI_COMM_WORLD);
omp_set_num_threads(nthread_input);
if (my_ID == root) {
printf("Number of ranks = %d\n", Num_procs);
printf("Number of threads = %d\n", omp_get_max_threads());
printf("Grid size = %d\n", n);
printf("Radius of stencil = %d\n", RADIUS);
printf("Tiles in x/y-direction = %d/%d\n", Num_procsx, Num_procsy);
printf("Type of stencil = star\n");
#if DOUBLE
printf("Data type = double precision\n");
#else
printf("Data type = single precision\n");
#endif
#if LOOPGEN
printf("Script used to expand stencil loop body\n");
#else
printf("Compact representation of stencil loop body\n");
#endif
printf("Number of iterations = %d\n", iterations);
}
/* compute amount of space required for input and solution arrays */
width = n/Num_procsx;
leftover = n%Num_procsx;
if (my_IDx<leftover) {
istart = (width+1) * my_IDx;
iend = istart + width;
}
else {
istart = (width+1) * leftover + width * (my_IDx-leftover);
iend = istart + width - 1;
}
width = iend - istart + 1;
if (width == 0) {
printf("ERROR: rank %d has no work to do\n", my_ID);
error = 1;
}
bail_out(error);
height = n/Num_procsy;
leftover = n%Num_procsy;
if (my_IDy<leftover) {
jstart = (height+1) * my_IDy;
jend = jstart + height;
}
else {
jstart = (height+1) * leftover + height * (my_IDy-leftover);
jend = jstart + height - 1;
}
height = jend - jstart + 1;
if (height == 0) {
printf("ERROR: rank %d has no work to do\n", my_ID);
error = 1;
}
bail_out(error);
if (width < RADIUS || height < RADIUS) {
printf("ERROR: rank %d has work tile smaller then stencil radius\n",
my_ID);
error = 1;
}
bail_out(error);
total_length_in = (width+2*RADIUS)*(height+2*RADIUS)*sizeof(DTYPE);
if (total_length_in/(height+2*RADIUS) != (width+2*RADIUS)*sizeof(DTYPE)) {
printf("ERROR: Space for %d x %d input array cannot be represented\n",
width+2*RADIUS, height+2*RADIUS);
error = 1;
}
bail_out(error);
total_length_out = width*height*sizeof(DTYPE);
in = (DTYPE *) prk_malloc(total_length_in);
out = (DTYPE *) prk_malloc(total_length_out);
if (!in || !out) {
printf("ERROR: rank %d could not allocate space for input/output array\n",
my_ID);
error = 1;
}
bail_out(error);
/* fill the stencil weights to reflect a discrete divergence operator */
for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++)
WEIGHT(ii,jj) = (DTYPE) 0.0;
stencil_size = 4*RADIUS+1;
for (ii=1; ii<=RADIUS; ii++) {
WEIGHT(0, ii) = WEIGHT( ii,0) = (DTYPE) (1.0/(2.0*ii*RADIUS));
WEIGHT(0,-ii) = WEIGHT(-ii,0) = -(DTYPE) (1.0/(2.0*ii*RADIUS));
}
norm = (DTYPE) 0.0;
f_active_points = (DTYPE) (n-2*RADIUS)*(DTYPE) (n-2*RADIUS);
/* intialize the input and output arrays */
#pragma omp parallel for private (i)
for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) {
IN(i,j) = COEFX*i+COEFY*j;
OUT(i,j) = (DTYPE)0.0;
}
/* allocate communication buffers for halo values */
top_buf_out = (DTYPE *) prk_malloc(4*sizeof(DTYPE)*RADIUS*width);
if (!top_buf_out) {
printf("ERROR: Rank %d could not allocated comm buffers for y-direction\n", my_ID);
error = 1;
}
bail_out(error);
top_buf_in = top_buf_out + RADIUS*width;
bottom_buf_out = top_buf_out + 2*RADIUS*width;
bottom_buf_in = top_buf_out + 3*RADIUS*width;
right_buf_out = (DTYPE *) prk_malloc(4*sizeof(DTYPE)*RADIUS*height);
if (!right_buf_out) {
printf("ERROR: Rank %d could not allocated comm buffers for x-direction\n", my_ID);
error = 1;
}
bail_out(error);
right_buf_in = right_buf_out + RADIUS*height;
left_buf_out = right_buf_out + 2*RADIUS*height;
left_buf_in = right_buf_out + 3*RADIUS*height;
for (iter = 0; iter<=iterations; iter++){
/* start timer after a warmup iteration */
if (iter == 1) {
MPI_Barrier(MPI_COMM_WORLD);
local_stencil_time = wtime();
}
/* need to fetch ghost point data from neighbors in y-direction */
if (my_IDy < Num_procsy-1) {
MPI_Irecv(top_buf_in, RADIUS*width, MPI_DTYPE, top_nbr, 101,
MPI_COMM_WORLD, &(request[1]));
for (kk=0,j=jend-RADIUS+1; j<=jend; j++) for (i=istart; i<=iend; i++) {
top_buf_out[kk++]= IN(i,j);
}
MPI_Isend(top_buf_out, RADIUS*width,MPI_DTYPE, top_nbr, 99,
MPI_COMM_WORLD, &(request[0]));
}
if (my_IDy > 0) {
MPI_Irecv(bottom_buf_in,RADIUS*width, MPI_DTYPE, bottom_nbr, 99,
MPI_COMM_WORLD, &(request[3]));
for (kk=0,j=jstart; j<=jstart+RADIUS-1; j++) for (i=istart; i<=iend; i++) {
bottom_buf_out[kk++]= IN(i,j);
}
MPI_Isend(bottom_buf_out, RADIUS*width,MPI_DTYPE, bottom_nbr, 101,
MPI_COMM_WORLD, &(request[2]));
}
if (my_IDy < Num_procsy-1) {
MPI_Wait(&(request[0]), MPI_STATUS_IGNORE);
MPI_Wait(&(request[1]), MPI_STATUS_IGNORE);
for (kk=0,j=jend+1; j<=jend+RADIUS; j++) for (i=istart; i<=iend; i++) {
IN(i,j) = top_buf_in[kk++];
}
}
if (my_IDy > 0) {
MPI_Wait(&(request[2]), MPI_STATUS_IGNORE);
MPI_Wait(&(request[3]), MPI_STATUS_IGNORE);
for (kk=0,j=jstart-RADIUS; j<=jstart-1; j++) for (i=istart; i<=iend; i++) {
IN(i,j) = bottom_buf_in[kk++];
}
}
/* need to fetch ghost point data from neighbors in x-direction */
if (my_IDx < Num_procsx-1) {
MPI_Irecv(right_buf_in, RADIUS*height, MPI_DTYPE, right_nbr, 1010,
MPI_COMM_WORLD, &(request[1+4]));
for (kk=0,j=jstart; j<=jend; j++) for (i=iend-RADIUS+1; i<=iend; i++) {
right_buf_out[kk++]= IN(i,j);
}
MPI_Isend(right_buf_out, RADIUS*height, MPI_DTYPE, right_nbr, 990,
MPI_COMM_WORLD, &(request[0+4]));
}
if (my_IDx > 0) {
MPI_Irecv(left_buf_in, RADIUS*height, MPI_DTYPE, left_nbr, 990,
MPI_COMM_WORLD, &(request[3+4]));
for (kk=0,j=jstart; j<=jend; j++) for (i=istart; i<=istart+RADIUS-1; i++) {
left_buf_out[kk++]= IN(i,j);
}
MPI_Isend(left_buf_out, RADIUS*height, MPI_DTYPE, left_nbr, 1010,
MPI_COMM_WORLD, &(request[2+4]));
}
if (my_IDx < Num_procsx-1) {
MPI_Wait(&(request[0+4]), MPI_STATUS_IGNORE);
MPI_Wait(&(request[1+4]), MPI_STATUS_IGNORE);
for (kk=0,j=jstart; j<=jend; j++) for (i=iend+1; i<=iend+RADIUS; i++) {
IN(i,j) = right_buf_in[kk++];
}
}
if (my_IDx > 0) {
MPI_Wait(&(request[2+4]), MPI_STATUS_IGNORE);
MPI_Wait(&(request[3+4]), MPI_STATUS_IGNORE);
for (kk=0,j=jstart; j<=jend; j++) for (i=istart-RADIUS; i<=istart-1; i++) {
IN(i,j) = left_buf_in[kk++];
}
}
/* Apply the stencil operator */
#pragma omp parallel for private (i, j, ii, jj)
for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {
for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {
#if LOOPGEN
#include "loop_body_star.incl"
#else
for (jj=-RADIUS; jj<=RADIUS; jj++) OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj);
for (ii=-RADIUS; ii<0; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);
for (ii=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j);
#endif
}
}
#pragma omp parallel for private (i)
/* add constant to solution to force refresh of neighbor data, if any */
for (j=jstart; j<=jend; j++) for (i=istart; i<=iend; i++) IN(i,j)+= 1.0;
}
local_stencil_time = wtime() - local_stencil_time;
MPI_Reduce(&local_stencil_time, &stencil_time, 1, MPI_DOUBLE, MPI_MAX, root,
MPI_COMM_WORLD);
/* compute L1 norm in parallel */
local_norm = (DTYPE) 0.0;
#pragma omp parallel for reduction(+:local_norm) private (i)
for (j=MAX(jstart,RADIUS); j<=MIN(n-RADIUS-1,jend); j++) {
for (i=MAX(istart,RADIUS); i<=MIN(n-RADIUS-1,iend); i++) {
local_norm += (DTYPE)ABS(OUT(i,j));
}
}
MPI_Reduce(&local_norm, &norm, 1, MPI_DTYPE, MPI_SUM, root, MPI_COMM_WORLD);
/*******************************************************************************
** Analyze and output results.
********************************************************************************/
/* verify correctness */
if (my_ID == root) {
norm /= f_active_points;
if (RADIUS > 0) {
reference_norm = (DTYPE) (iterations+1) * (COEFX + COEFY);
}
else {
reference_norm = (DTYPE) 0.0;
}
if (ABS(norm-reference_norm) > EPSILON) {
printf("ERROR: L1 norm = "FSTR", Reference L1 norm = "FSTR"\n",
norm, reference_norm);
error = 1;
}
else {
printf("Solution validates\n");
#if VERBOSE
printf("Reference L1 norm = "FSTR", L1 norm = "FSTR"\n",
reference_norm, norm);
#endif
}
}
bail_out(error);
if (my_ID == root) {
/* flops/stencil: 2 flops (fma) for each point in the stencil,
plus one flop for the update of the input of the array */
flops = (DTYPE) (2*stencil_size+1) * f_active_points;
avgtime = stencil_time/iterations;
printf("Rate (MFlops/s): "FSTR" Avg time (s): %lf\n",
1.0E-06 * flops/avgtime, avgtime);
}
MPI_Finalize();
exit(EXIT_SUCCESS);
}
|
GB_unop__bnot_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_uint32_uint32)
// op(A') function: GB (_unop_tran__bnot_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_uint64
// op(A') function: GB_tran__minv_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_uint64
(
uint16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vbHmmPcFret.c | /*
* vbHmmPcFret.c
* Model-specific core functions for VB-HMM-PC-FRET.
*
* Created by OKAMOTO Kenji, SAKO Yasushi and RIKEN
* Copyright 2011-2015
* Cellular Informatics Laboratory, Advance Science Institute, RIKEN, Japan.
* All rights reserved.
*
* Ver. 1.1.0
* Last modified on 2015.09.17
*/
#include "vbHmmPcFret.h"
#include <gsl/gsl_sf_gamma.h>
#include <gsl/gsl_sf_psi.h>
#include <string.h>
#include "rand.h"
#ifdef _OPENMP
#include "omp.h"
#endif
//#define DEBUG
//// Uncomment one/both of the following defenitions to activate constraint on I and K.
//#define INTENSITY_CAP
#ifdef INTENSITY_CAP
#define maxIntensityRatio 10.0
#endif
#define MAX(a,b) ((a)>(b)?(a):(b))
#define MIN(a,b) ((a)<(b)?(a):(b))
//static int isGlobalAnalysis = 0;
void setFunctions_pcFret(){
commonFunctions funcs;
funcs.newModelParameters = newModelParameters_pcFret;
funcs.freeModelParameters = freeModelParameters_pcFret;
funcs.newModelStats = newModelStats_pcFret;
funcs.freeModelStats = freeModelStats_pcFret;
funcs.initializeVbHmm = initializeVbHmm_pcFret;
funcs.pTilde_z1 = pTilde_z1_pcFret;
funcs.pTilde_zn_zn1 = pTilde_zn_zn1_pcFret;
funcs.pTilde_xn_zn = pTilde_xn_zn_pcFret;
funcs.calcStatsVars = calcStatsVars_pcFret;
funcs.maximization = maximization_pcFret;
funcs.varLowerBound = varLowerBound_pcFret;
funcs.reorderParameters = reorderParameters_pcFret;
funcs.outputResults = outputResults_pcFret;
setFunctions( funcs );
}
//void setGFunctions_pcFret(){
// gCommonFunctions funcs;
// funcs.newModelParameters = newModelParameters_pcFret;
// funcs.freeModelParameters = freeModelParameters_pcFret;
// funcs.newModelStats = newModelStats_pcFret;
// funcs.freeModelStats = freeModelStats_pcFret;
// funcs.newModelStatsG = newModelStatsG_pcFret;
// funcs.freeModelStatsG = freeModelStatsG_pcFret;
// funcs.initializeVbHmmG = initializeVbHmmG_pcFret;
// funcs.pTilde_z1 = pTilde_z1_pcFret;
// funcs.pTilde_zn_zn1 = pTilde_zn_zn1_pcFret;
// funcs.pTilde_xn_zn = pTilde_xn_zn_pcFret;
// funcs.calcStatsVarsG = calcStatsVarsG_pcFret;
// funcs.maximizationG = maximizationG_pcFret;
// funcs.varLowerBoundG = varLowerBoundG_pcFret;
// funcs.reorderParametersG = reorderParametersG_pcFret;
// funcs.outputResultsG = outputResultsG_pcFret;
// setGFunctions( funcs );
// isGlobalAnalysis = 1;
//}
void outputResults_pcFret( xn, gv, iv, logFP )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
FILE *logFP;
{
outputPcFretResults( xn, gv, iv, logFP );
}
//void outputResultsG_pcFret( xns, gv, ivs, logFP )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//FILE *logFP;
//{
// outputPcFretResultsG( xns, gv, ivs, logFP );
//}
void *newModelParameters_pcFret( xn, sNo )
xnDataSet *xn;
int sNo;
{
int i;
pcFretParameters *p = (void*)malloc( sizeof(pcFretParameters) );
p->uPiArr = (double*)malloc( sNo * sizeof(double) );
p->sumUPi = 0.0;
p->uAMat = (double**)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ ){
p->uAMat[i] = (double*)malloc( sNo * sizeof(double) );
}
p->sumUAArr = (double*)malloc( sNo * sizeof(double) );
p->aIArr = (double*)malloc( sNo * sizeof(double) );
p->bIArr = (double*)malloc( sNo * sizeof(double) );
p->uEArr = (double*)malloc( sNo * sizeof(double) );
p->vEArr = (double*)malloc( sNo * sizeof(double) );
p->avgPi = (double *)malloc( sNo * sizeof(double) );
p->avgLnPi = (double *)malloc( sNo * sizeof(double) );
p->avgA = (double **)malloc( sNo * sizeof(double*) );
p->avgLnA = (double **)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ ){
p->avgA[i] = (double *)malloc( sNo * sizeof(double) );
p->avgLnA[i] = (double *)malloc( sNo * sizeof(double) );
}
p->avgI = (double *)malloc( sNo * sizeof(double) );
p->avgLnI = (double *)malloc( sNo * sizeof(double) );
p->avgE = (double *)malloc( sNo * sizeof(double) );
p->avgLnE = (double **)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ )
{ p->avgLnE[i] = (double *)malloc( 2 * sizeof(double) ); }
return p;
}
void freeModelParameters_pcFret( p, xn, sNo )
void **p;
xnDataSet *xn;
int sNo;
{
pcFretParameters *gp = *p;
int i;
free( gp->uPiArr );
for( i = 0 ; i < sNo ; i++ ){
free( gp->uAMat[i] );
}
free( gp->uAMat );
free( gp->sumUAArr );
free( gp->aIArr );
free( gp->bIArr );
free( gp->uEArr );
free( gp->vEArr );
free( gp->avgPi );
free( gp->avgLnPi );
for( i = 0 ; i < sNo ; i++ ){
free( gp->avgA[i] );
free( gp->avgLnA[i] );
}
free( gp->avgA );
free( gp->avgLnA );
free( gp->avgI );
free( gp->avgLnI );
free( gp->avgE );
for( i = 0 ; i < sNo ; i++ ){
free( gp->avgLnE[i] );
}
free( gp->avgLnE );
free( *p );
*p = NULL;
}
void *newModelStats_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
// if( isGlobalAnalysis == 0 ){
int sNo = gv->sNo;
pcFretStats *s = (pcFretStats*)malloc( sizeof(pcFretStats) );
int i;
s->Ni = (double *)malloc( sNo * sizeof(double) );
s->Ci = (double *)malloc( sNo * sizeof(double) );
s->Di = (double *)malloc( sNo * sizeof(double) );
s->Ai = (double *)malloc( sNo * sizeof(double) );
s->Mi = (double *)malloc( sNo * sizeof(double) );
s->Nij = (double **)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ )
{ s->Nij[i] = (double *)malloc( sNo * sizeof(double) ); }
return s;
// } else {
//
// return NULL;
//
// }
}
void freeModelStats_pcFret( s, xn, gv, iv )
void **s;
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
// if( isGlobalAnalysis == 0 ){
int sNo = gv->sNo;
pcFretStats *gs = *s;
int i;
free( gs->Ni );
free( gs->Ci );
free( gs->Di );
free( gs->Ai );
free( gs->Mi );
for( i = 0 ; i < sNo ; i++ )
{ free( gs->Nij[i] ); }
free( gs->Nij );
free( gs );
*s = NULL;
// }
}
//void *newModelStatsG_pcFret( xns, gv, ivs)
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
// int sNo = gv->sNo;
// pcFretGlobalStats *gs = (pcFretGlobalStats*)malloc( sizeof(pcFretGlobalStats) );
//
// return gs;
//}
//void freeModelStatsG_pcFret( gs, xns, gv, ivs )
//void **gs;
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
// int sNo = gv->sNo;
// pcFretGlobalStats *ggs = *gs;
//
// free( *gs );
// *gs = NULL;
//}
void initializeVbHmm_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
pcFretData *d = xn->data;
size_t dLen = xn->N;
int sNo = gv->sNo;
pcFretParameters *p = gv->params;
int i, j;
size_t totalC = 0;
for( i = 0 ; i < dLen ; i++ ){
totalC += d->dCounts[i] + d->aCounts[i];
}
double meanI = (double)totalC / (double)dLen;
// hyper parameter for p( pi(i) )
p->sumUPi = 0.0;
for( i = 0 ; i < sNo ; i++ ){
p->uPiArr[i] = 1.0;
p->sumUPi += p->uPiArr[i];
}
// hyper parameter for p( A(i,j) )
for( i = 0 ; i < sNo ; i++ ){
p->sumUAArr[i] = 0.0;
for( j = 0 ; j < sNo ; j++ ){
if( j == i ){
p->uAMat[i][j] = 100.0;
} else {
p->uAMat[i][j] = 1.0;
}
p->sumUAArr[i] += p->uAMat[i][j];
}
}
// hyper parameter for p( I(k) )
for( i = 0 ; i < sNo ; i++ ){
p->aIArr[i] = 1.0;
p->bIArr[i] = 1.0 / meanI;
}
// hyper parameter for p( E(i) )
for( i = 0 ; i < sNo ; i++ ){
p->uEArr[i] = 1.0;
p->vEArr[i] = 1.0;
}
initialize_indVars_pcFret( xn, gv, iv );
calcStatsVars_pcFret( xn, gv, iv );
maximization_pcFret( xn, gv, iv );
}
//void initializeVbHmmG_pcFret( xns, gv, ivs )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
//}
void initialize_indVars_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
size_t dLen = xn->N;
int sNo = gv->sNo;
double **gmMat = iv->gmMat;
int i;
size_t n;
double sumPar;
for( n = 0 ; n < dLen ; n++ ){
sumPar = 0.0;
for( i = 0 ; i < sNo ; i++ ){
gmMat[n][i] = enoise(1.0) + 1.0;
sumPar += gmMat[n][i];
}
for( i = 0 ; i < sNo ; i++ ){
gmMat[n][i] /= sumPar;
}
}
}
xnDataSet *newXnDataSet_pcFret( filename )
const char *filename;
{
xnDataSet *xn = (xnDataSet*)malloc( sizeof(xnDataSet) );
xn->name = (char*)malloc( strlen(filename) + 2 );
strncpy( xn->name, filename, strlen(filename)+1 );
xn->data = (pcFretData*)malloc( sizeof(pcFretData) );
pcFretData *d = (pcFretData*)xn->data;
d->binSize = 0.0;
d->dCounts = NULL;
d->aCounts = NULL;
return xn;
}
void freeXnDataSet_pcFret( xn )
xnDataSet **xn;
{
pcFretData *d = (pcFretData*)(*xn)->data;
free( d->dCounts );
free( d->aCounts );
free( (*xn)->data );
free( (*xn)->name );
free( *xn );
*xn = NULL;
}
double pTilde_z1_pcFret( i, params )
int i;
void *params;
{
pcFretParameters *p = (pcFretParameters*)params;
return exp( p->avgLnPi[i] );
}
double pTilde_zn_zn1_pcFret( i, j, params )
int i, j;
void *params;
{
pcFretParameters *p = (pcFretParameters*)params;
return exp( p->avgLnA[i][j] );
}
double pTilde_xn_zn_pcFret( xn, n, i, params )
xnDataSet *xn;
size_t n;
int i;
void *params;
{
pcFretParameters *p = (pcFretParameters*)params;
pcFretData *d = (pcFretData*)xn->data;
return exp( d->dCounts[n]*p->avgLnE[i][0] + d->aCounts[n]*p->avgLnE[i][1] + (d->dCounts[n]+d->aCounts[n])*p->avgLnI[i] - p->avgI[i] ) / gsl_sf_fact( d->dCounts[n] ) / gsl_sf_fact( d->aCounts[n] );
}
void calcStatsVars_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
pcFretData *d = (pcFretData*)xn->data;
pcFretStats *s = (pcFretStats*)iv->stats;
size_t dLen = xn->N;
int sNo = gv->sNo;
double **gmMat = iv->gmMat, ***xiMat = iv->xiMat;
double *Ni = s->Ni, *Ci = s->Ci, *Di = s->Di, *Ai = s->Ai;
double *Mi = s->Mi, **Nij = s->Nij;
size_t n;
int i, j;
for( i = 0 ; i < sNo ; i++ ){
Ni[i] = 1e-10;
Ci[i] = 1e-10;
Di[i] = 1e-10;
Ai[i] = 1e-10;
Mi[i] = 1e-10;
for( j = 0 ; j < sNo ; j++ ){
Nij[i][j] = 1e-10;
}
for( n = 0 ; n < dLen ; n++ ){
Ni[i] += gmMat[n][i];
Di[i] += gmMat[n][i] * (double)d->dCounts[n];
Ai[i] += gmMat[n][i] * (double)d->aCounts[n];
for( j = 0 ; j < sNo ; j++ ){
Mi[i] += xiMat[n][i][j];
Nij[i][j] += xiMat[n][i][j];
}
}
Ci[i] = Di[i] + Ai[i];
}
//#ifdef DEBUG
//#pragma omp critical
//{
// for( n = 0 ; n < 20 ; n++ ){
// for( i = 0 ; i < sNo ; i++ ){
// fprintf(logFP, "%g,", gmMat[n][i]);
// }
// fprintf(logFP, "; ");
// }
// fprintf(logFP, "\n");
// for( i = 0 ; i < sNo ; i++ ){
// fprintf(logFP, "Ni(%d)=%g, ", i, Ni[i]);
// fprintf(logFP, "Ti(%d)=%g, ", i, Ti[i]);
// fprintf(logFP, "Mi(%d)=%g, ", i, Mi[i]);
// for( j = 0 ; j < sNo ; j++ ){
// if( j != i )
// fprintf(logFP, "Nij(%d,%d)=%g, ", i, j, Nij[i][j]);
// }
// fprintf(logFP, "\n");
// }
//}
//#endif
}
//void calcStatsVarsG_pcFret( xns, gv, ivs )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
//}
void maximization_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
pcFretParameters *p = (pcFretParameters*)gv->params;
pcFretStats *s = (pcFretStats*)iv->stats;
int sNo = gv->sNo;
double **gmMat = iv->gmMat;
double *uPiArr = p->uPiArr, sumUPi = p->sumUPi, *aIArr = p->aIArr, *bIArr = p->bIArr;
double **uAMat = p->uAMat, *sumUAArr = p->sumUAArr;
double *uEArr = p->uEArr, *vEArr = p->vEArr;
double *avgPi = p->avgPi, *avgLnPi = p->avgLnPi, **avgA = p->avgA, **avgLnA = p->avgLnA;
double *avgE = p->avgE, **avgLnE = p->avgLnE;
double *avgI = p->avgI, *avgLnI = p->avgLnI;
double *Ni = s->Ni, *Ci = s->Ci, *Di = s->Di, *Ai = s->Ai;
double *Mi = s->Mi, **Nij = s->Nij;
int i, j;
for( i = 0 ; i < sNo ; i++ ){
avgPi[i] = ( uPiArr[i] + gmMat[0][i] ) / ( sumUPi + 1.0 );
avgLnPi[i] = gsl_sf_psi( uPiArr[i] + gmMat[0][i] ) - gsl_sf_psi( sumUPi + 1.0 );
for( j = 0 ; j < sNo ; j++ ){
avgA[i][j] = ( uAMat[i][j] + Nij[i][j] ) / ( sumUAArr[i] + Mi[i] );
avgLnA[i][j] = gsl_sf_psi( uAMat[i][j] + Nij[i][j] ) - gsl_sf_psi( sumUAArr[i] + Mi[i] );
}
avgI[i] = (Ci[i] + aIArr[i]) / (Ni[i] + bIArr[i]);
avgLnI[i] = gsl_sf_psi( Ci[i] + aIArr[i] ) - log( Ni[i] + bIArr[i] );
#ifdef INTENSITY_CAP
size_t n, totalC = 0;
pcFretData *pc = xnWv->data;
for( n = 0 ; n < xnWv->N ; n++ ){
totalC += pc->dCounts[n] + pc->aCounts[n];
}
double meanI = (double)totalC / (double)xnWv->N;
avgI[i] = MIN( avgI[i], maxIntensityRatio * meanI );
avgLnI[i] = MIN( avgLnI[i], log(maxIntensityRatio * meanI) );
#endif
avgE[i] = ( uEArr[i] + Ai[i] ) / ( uEArr[i] + vEArr[i] + Ci[i] );
// ln(1-E) for donor
avgLnE[i][0] = gsl_sf_psi( vEArr[i] + Di[i] ) - gsl_sf_psi( uEArr[i] + vEArr[i] + Ci[i] );
// ln(E) for acceptor
avgLnE[i][1] = gsl_sf_psi( uEArr[i] + Ai[i] ) - gsl_sf_psi( uEArr[i] + vEArr[i] + Ci[i] );
}
}
//void maximizationG_pc( xns, gv, ivs )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
//}
double varLowerBound_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
pcFretParameters *p = (pcFretParameters*)gv->params;
pcFretStats *s = (pcFretStats*)iv->stats;
size_t dLen = xn->N;
int sNo = gv->sNo;
double **gmMat = iv->gmMat, *cn = iv->cn;
double *uPiArr = p->uPiArr, sumUPi = p->sumUPi, *aIArr = p->aIArr, *bIArr = p->bIArr;
double **uAMat = p->uAMat, *sumUAArr = p->sumUAArr;
double *uEArr = p->uEArr, *vEArr = p->vEArr;
double *avgLnPi = p->avgLnPi, **avgLnA = p->avgLnA;
double *avgI = p->avgI, *avgLnI = p->avgLnI, **avgLnE = p->avgLnE;
double *Ni = s->Ni, *Ci = s->Ci, *Di = s->Di, *Ai = s->Ai;
double *Mi = s->Mi, **Nij = s->Nij;
size_t n;
int i, j;
double lnpPi = gsl_sf_lngamma(sumUPi);
double lnpA = 0.0;
double lnpI = 0.0;
double lnpE = 0.0;
double lnqPi = gsl_sf_lngamma(sumUPi + 1.0);
double lnqA = 0.0;
double lnqI = 0.0;
double lnqE = 0.0;
for( i = 0 ; i < sNo ; i++ ){
lnpPi += (uPiArr[i]-1.0) * avgLnPi[i] - gsl_sf_lngamma(uPiArr[i]);
lnpI += aIArr[i] * log(bIArr[i]) - gsl_sf_lngamma(aIArr[i]);
lnpI += (aIArr[i] - 1.0) * avgLnI[i] - bIArr[i] * avgI[i];
lnpE += gsl_sf_lngamma(uEArr[i]+vEArr[i]) - gsl_sf_lngamma(uEArr[i]);
lnpE += -gsl_sf_lngamma(vEArr[i]);
lnpE += (uEArr[i]-1.0)*avgLnE[i][1] + (vEArr[i]-1.0)*avgLnE[i][0];
lnqPi += (uPiArr[i]+gmMat[0][i]-1.0) * (gsl_sf_psi(uPiArr[i]+gmMat[0][i]) - gsl_sf_psi(sumUPi+1.0));
lnqPi -= gsl_sf_lngamma(uPiArr[i] + gmMat[0][i]);
lnqI += (Ci[i] + aIArr[i]) * log(Ni[i] + bIArr[i]) - gsl_sf_lngamma(Ci[i] + aIArr[i]);
lnqI += (Ci[i] + aIArr[i] - 1.0) * avgLnI[i] - (Ni[i] + bIArr[i]) * avgI[i];
lnqE += gsl_sf_lngamma(uEArr[i] + vEArr[i] + Ci[i]);
lnqE -= gsl_sf_lngamma(uEArr[i] + Ai[i]);
lnqE -= gsl_sf_lngamma(vEArr[i] + Di[i]);
lnqE += (uEArr[i] + Ai[i] - 1.0) * avgLnE[i][1];
lnqE += (vEArr[i] + Di[i] - 1.0) * avgLnE[i][0];
lnpA += gsl_sf_lngamma(sumUAArr[i]);
lnqA += gsl_sf_lngamma(sumUAArr[i] + Mi[i]);
for( j = 0 ; j < sNo ; j++ ){
lnpA += (uAMat[i][j]-1.0)*avgLnA[i][j] - gsl_sf_lngamma(uAMat[i][j]);
lnqA += (uAMat[i][j] + Nij[i][j] - 1.0) * (gsl_sf_psi(uAMat[i][j]+Nij[i][j]) - gsl_sf_psi(sumUAArr[i]+Mi[i]));
lnqA -= gsl_sf_lngamma( uAMat[i][j] + Nij[i][j] );
}
}
double lnpX = 0.0;
for( n = 0 ; n < dLen ; n++ ){
lnpX += log( cn[n] );
}
double val;
val = lnpPi + lnpA + lnpI + lnpE;
val -= lnqPi + lnqA + lnqI + lnqE;
val += lnpX;
val += log(gsl_sf_fact(sNo));
//#ifdef DEBUG
//#pragma omp critical
//{
// FILE *logFP = stderr;
// if( val > 100000 ){
// fprintf(logFP, " > %g; %g; %g; %g;", lnpPi, lnpA, lnpI, lnpE);
// fprintf(logFP, " %g; %g; %g; %g; %g\n", lnqPi, lnqA, lnqI, lnqE, lnpX);
// }
//}
//#endif
return val;
}
//double varLowerBoundG_pcFret( xns, gv, ivs )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
//}
void reorderParameters_pcFret( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
pcFretParameters *p = (pcFretParameters*)gv->params;
pcFretStats *s = (pcFretStats*)iv->stats;
size_t dLen = xn->N;
int sNo = gv->sNo;
double **gmMat = iv->gmMat, ***xiMat = iv->xiMat;
double *avgPi = p->avgPi, *avgLnPi = p->avgLnPi, **avgA = p->avgA;
double **avgLnA = p->avgLnA;
double *avgI = p->avgI, *avgLnI = p->avgLnI, *avgE = p->avgE, **avgLnE = p->avgLnE;
double *Ni = s->Ni, *Ci = s->Ci, *Di = s->Di, *Ai = s->Ai;
size_t n;
int i, j;
int *index = (int*)malloc( sNo * sizeof(int) );
double *store = (double*)malloc( sNo * sizeof(double) );
double **s2D = (double**)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ )
{ s2D[i] = (double*)malloc( MAX(sNo,2) * sizeof(double) ); }
// index indicates order of avgE values (0=biggest avgE -- sNo=smallest avgE).
for( i = 0 ; i < sNo ; i++ ){
index[i] = sNo - 1;
for( j = 0 ; j < sNo ; j++ ){
if( j != i ){
if( avgE[i] < avgE[j] ){
index[i]--;
} else if( avgE[i] == avgE[j] ){
if( j > i )
{ index[i]--; }
}
}
}
}
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = avgPi[i]; }
for( i = 0 ; i < sNo ; i++ ){ avgPi[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = avgLnPi[i]; }
for( i = 0 ; i < sNo ; i++ ){ avgLnPi[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = avgI[i]; }
for( i = 0 ; i < sNo ; i++ ){ avgI[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = avgLnI[i]; }
for( i = 0 ; i < sNo ; i++ ){ avgLnI[i] = store[i]; }
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ s2D[index[i]][index[j]] = avgA[i][j]; }
}
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ avgA[i][j] = s2D[i][j]; }
}
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ s2D[index[i]][index[j]] = avgLnA[i][j]; }
}
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ avgLnA[i][j] = s2D[i][j]; }
}
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = avgE[i]; }
for( i = 0 ; i < sNo ; i++ ){ avgE[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ )
{ s2D[index[i]][0] = avgLnE[i][0];
s2D[index[i]][1] = avgLnE[i][1]; }
for( i = 0 ; i < sNo ; i++ )
{ avgLnE[i][0] = s2D[i][0];
avgLnE[i][1] = s2D[i][1]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = Ni[i]; }
for( i = 0 ; i < sNo ; i++ ){ Ni[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = Ci[i]; }
for( i = 0 ; i < sNo ; i++ ){ Ci[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = Di[i]; }
for( i = 0 ; i < sNo ; i++ ){ Di[i] = store[i]; }
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = Ai[i]; }
for( i = 0 ; i < sNo ; i++ ){ Ai[i] = store[i]; }
for( n = 0 ; n < dLen ; n++ ){
for( i = 0 ; i < sNo ; i++ ){ store[index[i]] = gmMat[n][i]; }
for( i = 0 ; i < sNo ; i++ ){ gmMat[n][i] = store[i]; }
}
for( n = 0 ; n < dLen ; n++ ){
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ s2D[index[i]][index[j]] = xiMat[n][i][j]; }
}
for( j = 0 ; j < sNo ; j++ ){
for( i = 0 ; i < sNo ; i++ ){ xiMat[n][i][j] = s2D[i][j]; }
}
}
for( i = 0 ; i < sNo ; i++ ){ free( s2D[i] ); }
free( s2D );
free( store );
free( index );
}
//void reorderParametersG_pc( xns, gv, ivs )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//{
//}
void outputPcFretResults( xn, gv, iv, logFP )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
FILE *logFP;
{
pcFretParameters *p = (pcFretParameters*)gv->params;
int sNo = gv->sNo;
int i, j;
fprintf(logFP, " results: K = %d \n", sNo);
fprintf(logFP, " intensities: ( %g", p->avgI[0]);
for( i = 1 ; i < sNo ; i++ )
{ fprintf(logFP, ", %g", p->avgI[i]); }
fprintf(logFP, " ) \n");
fprintf(logFP, " FRET efficiencies: ( %g", p->avgE[0]);
for( i = 1 ; i < sNo ; i++ ){
fprintf(logFP, ", %g", p->avgE[i]);
}
fprintf(logFP, " ) \n");
fprintf(logFP, " pi: ( %g", p->avgPi[0]);
for( i = 1 ; i < sNo ; i++ ){
fprintf(logFP, ", %g", p->avgPi[i]);
}
fprintf(logFP, " ) \n");
fprintf(logFP, " A_matrix: [");
for( i = 0 ; i < sNo ; i++ ){
fprintf(logFP, " ( %g", p->avgA[i][0]);
for( j = 1 ; j < sNo ; j++ )
{ fprintf(logFP, ", %g", p->avgA[i][j]); }
fprintf(logFP, ")");
}
fprintf(logFP, " ] \n\n");
char fn[256];
FILE *fp;
size_t n;
sprintf( fn, "%s.param%03d", xn->name, sNo );
if( (fp = fopen( fn, "w")) != NULL ){
fprintf(fp, "I, E, pi");
for( i = 0 ; i < sNo ; i++ )
{ fprintf(fp, ", A%dx", i); }
fprintf(fp, "\n");
for( i = 0 ; i < sNo ; i++ ){
fprintf(fp, "%g, %g", p->avgI[i], p->avgE[i]);
for( j = 0 ; j < sNo ; j++ )
{ fprintf(fp, ", %g", p->avgA[j][i]); }
fprintf(fp, "\n");
}
fclose(fp);
}
sprintf( fn, "%s.Lq%03d", xn->name, sNo );
if( (fp = fopen( fn, "w")) != NULL ){
for( n = 0 ; n < gv->iteration ; n++ ){
fprintf( fp, "%24.20e\n", gv->LqArr[n] );
}
fclose(fp);
}
sprintf( fn, "%s.maxS%03d", xn->name, sNo );
if( (fp = fopen( fn, "w")) != NULL ){
for( n = 0 ; n < xn->N ; n++ ){
fprintf( fp, "%d\n", iv->stateTraj[n] );
}
fclose(fp);
}
}
//void outputPcFretResultsG( xns, gv, ivs, logFP )
//xnDataBundle *xns;
//globalVars *gv;
//indVarBundle *ivs;
//FILE *logFP;
//{
//}
//
|
kthvalue_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, typename Type>
static void getKthvalue(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out,
Type* t_indices, const int& k) {
bool partial_sort_flag = (k * 64) < input_width;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
if (partial_sort_flag) {
std::partial_sort(
col_vec.begin(), col_vec.begin() + k, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
} else {
std::nth_element(
col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
}
t_out[i] = col_vec[k - 1].first;
t_indices[i] = col_vec[k - 1].second;
}
}
template <typename T, typename Type>
static void kthvalueAssign(const Type& input_height, const Type& input_width,
const int& input_dim, const framework::Tensor* input,
const framework::Tensor* indices, T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
auto e_indices = framework::EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices =
framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
template <typename DeviceContext, typename T>
class KthvalueCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("X");
auto* output = context.Output<framework::Tensor>("Out");
auto* indices = context.Output<framework::Tensor>("Indices");
const auto& in_dims = input->dims();
int k = static_cast<int>(context.Attr<int>("k"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
int axis = static_cast<int>(context.Attr<int>("axis"));
if (axis < 0) axis += in_dims.size();
T* output_data = output->mutable_data<T>(context.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace());
auto out_dims = output->dims();
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(), input,
output_data, indices_data, k);
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
framework::DDim tmp_out_dims = pten::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
framework::DDim trans_dims(in_dims);
framework::DDim trans_out_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
framework::Tensor trans_inp;
trans_inp.mutable_data<T>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input,
&trans_inp, trans);
const int64_t input_height =
pten::product(pten::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
framework::Tensor tmp_out, tmp_indices;
T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace());
auto* t_ind =
tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace());
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_inp, t_out, t_ind, k);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, tmp_indices, indices, trans);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
};
template <typename DeviceContext, typename T>
class KthvalueGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<framework::Tensor>("X");
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<framework::Tensor>("Indices");
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
int axis = static_cast<int>(context.Attr<int>("axis"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
auto in_dims = x->dims();
auto out_dims = indices->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(out_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(out_dims[i - 1]);
}
out_dims = pten::make_ddim(tmp_out_shape);
}
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
if (axis == in_dims.size() - 1) {
const int64_t input_height =
pten::product(pten::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
memset(x_grad_data, 0, x_grad->numel() * sizeof(T));
if (keepdim) {
kthvalueAssign(input_height, input_width, in_dims.size(), out_grad,
indices, x_grad_data);
} else {
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
kthvalueAssign(input_height, input_width, in_dims.size(), &out_grad_tmp,
&indices_tmp, x_grad_data);
}
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(out_dims.size() - 1);
for (int i = axis + 1; i < out_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
framework::DDim trans_dims(out_dims);
framework::DDim trans_in_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = out_dims[trans[i]];
trans_in_dims[i] = in_dims[trans[i]];
}
framework::Tensor trans_dO, trans_ind;
trans_dO.mutable_data<T>(trans_dims, context.GetPlace());
trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
if (keepdim) {
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, *out_grad, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, *indices, &trans_ind, trans);
} else {
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, out_grad_tmp, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, indices_tmp, &trans_ind, trans);
}
const int64_t input_height = pten::product(
pten::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1));
const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1];
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace());
memset(t_out, 0, x_grad->numel() * sizeof(T));
kthvalueAssign<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_dO, &trans_ind, t_out);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
x_grad, trans);
}
}
};
} // namespace operators
} // namespace paddle
|
mkl_quantized_conv_ops.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest());
// Adjusting for having a symmetric range.
// for example: for 8-bit [-127, 127] as opposed to [-128, 127].
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#pragma omp parallel for
for (size_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
|
26_omp_heap.c | // clang-format off
// RUN: %run %s --omp 2>&1 | FileCheck %s --check-prefix=CHECK-TSAN
// RUN: %run %s --omp 2>&1 | FileCheck %s
// REQUIRES: openmp && softcounter
// clang-format on
#include <stdlib.h>
void repeat_alloc_free(unsigned n) {
for (int i = 0; i < n; i++) {
double* d = (double*)malloc(sizeof(double) * n);
free(d);
}
}
int main(int argc, char** argv) {
const int n = 1000;
// CHECK: [Trace] TypeART Runtime Trace
#pragma omp parallel sections
{
#pragma omp section
repeat_alloc_free(n);
#pragma omp section
repeat_alloc_free(n);
#pragma omp section
repeat_alloc_free(n);
}
// CHECK-TSAN-NOT: ThreadSanitizer
// CHECK-NOT: Error
// CHECK: Allocation type detail (heap, stack, global)
// CHECK: 6 : 3000 , 0 , 0 , double
// CHECK: Free allocation type detail (heap, stack)
// CHECK: 6 : 3000 , 0 , double
return 0;
} |
HDF5SubdomainDumperMPI.h | //
// HDF5SubdomainDumperMPI.h
// Cubism
//
// Created by Fabian Wermelinger 2018-08-03
// Copyright 2018 ETH Zurich. All rights reserved.
//
#ifndef HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL
#define HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL
#include <cassert>
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <mpi.h>
#include "HDF5Dumper.h"
CUBISM_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// helpers
namespace SubdomainTypesMPI
{
template <typename TGrid>
class Subdomain : public SubdomainTypes::Subdomain<TGrid>
{
public:
template <typename TSubdomain>
static std::vector<TSubdomain> getEntities(ArgumentParser& parser, TGrid& grid)
{
return SubdomainTypes::Subdomain<TGrid>::template getEntities<TSubdomain>(parser, grid);
}
public:
typedef TGrid GridType;
// bb_start: cell index within which the bounding box start (lower left) lies
// bb_end: cell index within which the bounding box end (upper right) lies
Subdomain(TGrid* grid, const int id,
const double start[3], const double end[3], const double* h[3],
const int bb_start[3]=0, const int bb_end[3]=0) :
SubdomainTypes::Subdomain<TGrid>(grid, id, start, end, h, bb_start, bb_end),
m_suboffset{0}
{
int myrank;
int color = static_cast<int>( this->m_valid );
MPI_Comm comm = this->m_grid->getCartComm();
MPI_Comm subcomm;
MPI_Comm_rank(comm, &myrank);
MPI_Comm_split(comm, color, myrank, &subcomm);
int pe_coords[3];
this->m_grid->peindex(pe_coords);
// compute offsets
this->m_max_size = 1;
unsigned long g_max_size = 0;
if (this->m_valid)
{
// 1. determine dimension and create cartesian sub-communicator
int pe_shift[3] = {
pe_coords[0],
pe_coords[1],
pe_coords[2]
};
MPI_Bcast(pe_shift, 3, MPI_INT, 0, subcomm);
for (int i = 0; i < 3; ++i)
pe_coords[i] -= pe_shift[i];
int pe_subdims[3];
for (int i = 0; i < 3; ++i)
{
MPI_Allreduce(&pe_coords[i], &pe_subdims[i], 1, MPI_INT, MPI_MAX, subcomm);
pe_subdims[i] += 1; // shift from index to dimension space
}
MPI_Comm subcartcomm;
int periodic[3] = {true};
MPI_Cart_create(subcomm, 3, pe_subdims, periodic, false, &subcartcomm);
// 2. compute file offsets using reduced 1D communicators
int subdims[][3] = {
{true, false, false},
{false, true, false},
{false, false, true}
};
for (int i = 0; i < 3; ++i)
{
MPI_Comm dimcomm;
MPI_Cart_sub(subcartcomm, subdims[i], &dimcomm);
MPI_Exscan(&this->m_subcount[i], &m_suboffset[i], 1, MPI_INT, MPI_SUM, dimcomm);
MPI_Comm_free(&dimcomm);
}
MPI_Comm_free(&subcartcomm);
// 3. reduce maximum element size of subdomain to all
// others in the sub-communicator
for (int i = 0; i < 3; ++i)
this->m_max_size *= static_cast<unsigned long>( this->m_subcount[i] );
MPI_Allreduce(&(this->m_max_size), &g_max_size, 1, MPI_UNSIGNED_LONG, MPI_MAX, subcomm);
}
MPI_Comm_free(&subcomm);
// 4. update maximum size globally
MPI_Allreduce(&g_max_size, &(this->m_max_size), 1, MPI_UNSIGNED_LONG, MPI_MAX, comm);
}
Subdomain(const Subdomain& c) = default;
inline const int (&offset() const)[3] { return m_suboffset; }
virtual void show(const std::string prefix="") const
{
std::cout << prefix << "subdomain" << this->m_id << ":" << std::endl;
std::cout << prefix << "ID = " << this->m_id << std::endl;
std::cout << prefix << "START = (" << this->m_start[0] << ", " << this->m_start[1] << ", " << this->m_start[2] << ")" << std::endl;
std::cout << prefix << "END = (" << this->m_end[0] << ", " << this->m_end[1] << ", " << this->m_end[2] << ")" << std::endl;
std::cout << prefix << "BBOX_START = (" << this->m_bbox_start[0] << ", " << this->m_bbox_start[1] << ", " << this->m_bbox_start[2] << ")" << std::endl;
std::cout << prefix << "BBOX_END = (" << this->m_bbox_end[0] << ", " << this->m_bbox_end[1] << ", " << this->m_bbox_end[2] << ")" << std::endl;
std::cout << prefix << "DIM = (" << this->m_subdim[0] << ", " << this->m_subdim[1] << ", " << this->m_subdim[2] << ")" << std::endl;
std::cout << prefix << "SUBDIM = (" << this->m_subcount[0] << ", " << this->m_subcount[1] << ", " << this->m_subcount[2] << ")" << std::endl;
std::cout << prefix << "OFFSET = (" << this->m_suboffset[0] << ", " << this->m_suboffset[1] << ", " << this->m_suboffset[2] << ")" << std::endl;
std::cout << prefix << "MAXSIZE = " << this->m_max_size << std::endl;
std::cout << prefix << "VALID = " << this->m_valid << std::endl;
std::cout << prefix << "NUMBER OF BLOCKS = " << this->m_intersecting_blocks.size() << std::endl;
}
protected:
int m_suboffset[3]; // index offset for my subdomain
};
}
///////////////////////////////////////////////////////////////////////////////
// Dumpers
//
// The following requirements for the data TStreamer are required:
// TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor)
// TStreamer::operate : Data access methods for read and write
// TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor")
template<typename TStreamer, typename hdf5Real, typename TSubdomain>
void DumpSubdomainHDF5MPI(const TSubdomain& subdomain,
const int stepID,
const typename TSubdomain::GridType::Real t,
const std::string &fname,
const std::string &dpath = ".",
const bool bXMF = true)
{
#ifdef CUBISM_USE_HDF
typedef typename TSubdomain::GridType::BlockType B;
int rank;
// fname is the base filepath tail without file type extension and
// additional identifiers
std::ostringstream filename;
std::ostringstream fullpath;
filename << fname << "_subdomain" << subdomain.id();
fullpath << dpath << "/" << filename.str();
MPI_Comm comm = subdomain.getGrid()->getCartComm();
MPI_Comm_rank(comm, &rank);
herr_t status;
hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id;
///////////////////////////////////////////////////////////////////////////
// write mesh
std::vector<int> mesh_dims;
std::vector<std::string> dset_name;
dset_name.push_back("/vx");
dset_name.push_back("/vy");
dset_name.push_back("/vz");
if (0 == rank)
{
H5open();
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
file_id = H5Fcreate((fullpath.str()+".h5").c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
status = H5Pclose(fapl_id);
for (size_t i = 0; i < 3; ++i)
{
const int nCells = subdomain.dim(i);
const double* const h = subdomain.grid_spacing(i);
std::vector<double> vertices(nCells+1, subdomain.start(i));
mesh_dims.push_back(vertices.size());
for (int j = 0; j < nCells; ++j)
vertices[j+1] = vertices[j] + h[j];;
hsize_t dim[1] = {vertices.size()};
fspace_id = H5Screate_simple(1, dim, NULL);
#ifndef CUBISM_ON_FERMI
dataset_id = H5Dcreate(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#else
dataset_id = H5Dcreate2(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#endif
status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, vertices.data());
status = H5Sclose(fspace_id);
status = H5Dclose(dataset_id);
}
// shutdown h5 file
status = H5Fclose(file_id);
H5close();
}
MPI_Barrier(comm);
///////////////////////////////////////////////////////////////////////////
// startup file
H5open();
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
status = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); if(status<0) H5Eprint1(stdout);
file_id = H5Fopen((fullpath.str()+".h5").c_str(), H5F_ACC_RDWR, fapl_id);
status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout);
///////////////////////////////////////////////////////////////////////////
// write data
std::vector<BlockInfo> infos_sub = subdomain.getBlocksInfo();
static const unsigned int NCHANNELS = TStreamer::NCHANNELS;
const unsigned int NX = subdomain.count()[0];
const unsigned int NY = subdomain.count()[1];
const unsigned int NZ = subdomain.count()[2];
const unsigned int DX = subdomain.dim()[0];
const unsigned int DY = subdomain.dim()[1];
const unsigned int DZ = subdomain.dim()[2];
if (rank==0)
{
std::cout << "Allocating " << (subdomain.max_size() * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.) << " MB of HDF5 subdomain data";
std::cout << " (Total " << (DX * DY * DZ * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.) << " MB)" << std::endl;
}
hsize_t count[4] = { NZ, NY, NX, NCHANNELS };
hsize_t dims[4] = { DZ, DY, DX, NCHANNELS };
hsize_t offset[4] = {
static_cast<hsize_t>(subdomain.offset()[2]),
static_cast<hsize_t>(subdomain.offset()[1]),
static_cast<hsize_t>(subdomain.offset()[0]),
0
};
hdf5Real * array_all = NULL;
if (subdomain.valid())
{
array_all = new hdf5Real[NX * NY * NZ * NCHANNELS];
const int bbox_start[3] = {
subdomain.bbox_start()[0],
subdomain.bbox_start()[1],
subdomain.bbox_start()[2]
};
const int bbox_end[3] = {
subdomain.bbox_end()[0],
subdomain.bbox_end()[1],
subdomain.bbox_end()[2]
};
#pragma omp parallel for
for(int i=0; i<(int)infos_sub.size(); i++)
{
BlockInfo& info = infos_sub[i];
const B& b = *(B*)info.ptrBlock;
const int idx[3] = { info.index[0], info.index[1], info.index[2] };
for(int iz=0; iz<static_cast<int>(B::sizeZ); iz++)
for(int iy=0; iy<static_cast<int>(B::sizeY); iy++)
for(int ix=0; ix<static_cast<int>(B::sizeX); ix++)
{
// cell local check: continue if the cell does not
// intersect the subdomain bounding box.
int gx = idx[0]*B::sizeX + ix;
int gy = idx[1]*B::sizeY + iy;
int gz = idx[2]*B::sizeZ + iz;
const bool b_containedX = (bbox_start[0] <= gx) && (gx <= bbox_end[0]);
const bool b_containedY = (bbox_start[1] <= gy) && (gy <= bbox_end[1]);
const bool b_containedZ = (bbox_start[2] <= gz) && (gz <= bbox_end[2]);
if (!(b_containedX && b_containedY && b_containedZ))
continue;
hdf5Real output[NCHANNELS];
for(unsigned int j=0; j<NCHANNELS; ++j)
output[j] = 0;
TStreamer::operate(b, ix, iy, iz, (hdf5Real*)output);
// shift the indices to subdomain index space
gx -= bbox_start[0];
gy -= bbox_start[1];
gz -= bbox_start[2];
hdf5Real * const ptr = array_all + NCHANNELS*(gx + NX * (gy + NY * gz));
for(unsigned int j=0; j<NCHANNELS; ++j)
ptr[j] = output[j];
}
}
}
fapl_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(fapl_id, H5FD_MPIO_COLLECTIVE);
fspace_id = H5Screate_simple(4, dims, NULL);
#ifndef CUBISM_ON_FERMI
dataset_id = H5Dcreate(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#else
dataset_id = H5Dcreate2(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#endif
fspace_id = H5Dget_space(dataset_id);
H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL);
mspace_id = H5Screate_simple(4, count, NULL);
if (!subdomain.valid())
{
H5Sselect_none(fspace_id);
H5Sselect_none(mspace_id);
}
status = H5Dwrite(dataset_id, get_hdf5_type<hdf5Real>(), mspace_id, fspace_id, fapl_id, array_all);
if (status < 0) H5Eprint1(stdout);
status = H5Sclose(mspace_id); if(status<0) H5Eprint1(stdout);
status = H5Sclose(fspace_id); if(status<0) H5Eprint1(stdout);
status = H5Dclose(dataset_id); if(status<0) H5Eprint1(stdout);
status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout);
status = H5Fclose(file_id); if(status<0) H5Eprint1(stdout);
H5close();
if (subdomain.valid())
delete [] array_all;
if (bXMF && rank==0)
{
FILE *xmf = 0;
xmf = fopen((fullpath.str()+".xmf").c_str(), "w");
fprintf(xmf, "<?xml version=\"1.0\" ?>\n");
fprintf(xmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n");
fprintf(xmf, "<Xdmf Version=\"2.0\">\n");
fprintf(xmf, " <Domain>\n");
fprintf(xmf, " <Grid GridType=\"Uniform\">\n");
fprintf(xmf, " <Time Value=\"%e\"/>\n\n", t);
fprintf(xmf, " <Topology TopologyType=\"3DRectMesh\" Dimensions=\"%d %d %d\"/>\n\n", mesh_dims[2], mesh_dims[1], mesh_dims[0]);
fprintf(xmf, " <Geometry GeometryType=\"VxVyVz\">\n");
fprintf(xmf, " <DataItem Name=\"mesh_vx\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[0]);
fprintf(xmf, " %s:/vx\n",(filename.str()+".h5").c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " <DataItem Name=\"mesh_vy\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[1]);
fprintf(xmf, " %s:/vy\n",(filename.str()+".h5").c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " <DataItem Name=\"mesh_vz\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[2]);
fprintf(xmf, " %s:/vz\n",(filename.str()+".h5").c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " </Geometry>\n\n");
fprintf(xmf, " <Attribute Name=\"data\" AttributeType=\"%s\" Center=\"Cell\">\n", TStreamer::getAttributeName());
fprintf(xmf, " <DataItem Dimensions=\"%d %d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",(int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)sizeof(hdf5Real));
fprintf(xmf, " %s:/data\n",(filename.str()+".h5").c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " </Attribute>\n");
fprintf(xmf, " </Grid>\n");
fprintf(xmf, " </Domain>\n");
fprintf(xmf, "</Xdmf>\n");
fclose(xmf);
}
#else
#warning USE OF HDF WAS DISABLED AT COMPILE TIME
#endif
}
CUBISM_NAMESPACE_END
#endif /* HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL */
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImage)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImage)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImage)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum(q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImage)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImage)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImage)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImage)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImage)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImage)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
symv_x_csc_n_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
symv_csc_n_hi_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], y[i], beta);
}
// each thread has a y_local
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT start = ais;
ALPHA_INT end = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if (A->row_indx[end] == i)
{
ALPHA_Number tmp;
alpha_mul(tmp, alpha, A->values[end]);
alpha_madde(y_local[tid][i], tmp, x[i]);
}
const ALPHA_INT *A_row = &A->row_indx[ais];
const ALPHA_Number *A_val = &A->values[ais];
ALPHA_INT ai = 0;
ALPHA_INT ail = end - start;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for (; ai < ail - 3; ai += 4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde(y_local[tid][ar0], av0, alpha_xi);
alpha_madde(y_local[tid][ar1], av1, alpha_xi);
alpha_madde(y_local[tid][ar2], av2, alpha_xi);
alpha_madde(y_local[tid][ar3], av3, alpha_xi);
alpha_mul(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for (; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde(y_local[tid][ar], av, alpha_xi);
alpha_mul(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT col = 0; col < m; col++)
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_csc_n_hi_unroll(alpha, A, x, beta, y);
}
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2018 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifndef OWL_CORE_CONV_IMPL
#define OWL_CORE_CONV_IMPL
/*
* Calculate the cache sizes and block sizes for convolution operations.
* Code heavily inspired by Eigen (http://eigen.tuxfamily.org/).
*/
#define IM2COL_THRESHOLD 512 * 1024
#define ALIGN_SIZE 32 // for AVX address alignment
OWL_INLINE void query_cache_sizes_intel(int* l1p, int* l2p, int* l3p) {
int cpuinfo[4];
int l1 = 0, l2 = 0, l3 = 0;
int cache_id = 0;
int cache_type = 0;
do {
cpuinfo[0] = cpuinfo[1] = cpuinfo[2] = cpuinfo[3] = 0;
CPUID(cpuinfo, 0x4, cache_id);
cache_type = (cpuinfo[0] & 0x0F) >> 0;
if(cache_type == 1 || cache_type == 3) {
int cache_level = (cpuinfo[0] & 0xE0) >> 5;
int ways = (cpuinfo[1] & 0xFFC00000) >> 22;
int partitions = (cpuinfo[1] & 0x003FF000) >> 12;
int line_size = (cpuinfo[1] & 0x00000FFF) >> 0;
int sets = (cpuinfo[2]);
int cache_size = (ways + 1) * (partitions + 1) * (line_size + 1) * (sets + 1);
switch(cache_level) {
case 1: l1 = cache_size; break;
case 2: l2 = cache_size; break;
case 3: l3 = cache_size; break;
default: break;
}
}
cache_id++;
} while(cache_type > 0 && cache_id < 16);
*l1p = l1; *l2p = l2; *l3p = l3;
return;
}
OWL_INLINE void query_cache_sizes(int* l1p, int* l2p, int* l3p) {
if (OWL_ARCH_i386 || OWL_ARCH_x86_64) {
int cpuinfo[4];
CPUID(cpuinfo, 0x0, 0);
int highest_func = cpuinfo[1];
if (highest_func >= 4)
query_cache_sizes_intel(l1p, l2p, l3p);
else {
*l1p = 32 * 1024;
*l2p = 256 * 1024;
*l3p = 2048 * 1024;
}
} else {
*l1p = 16 * 1024;
*l2p = 512 * 1024;
*l3p = 512 * 1024;
}
}
// The effect of calculating block size according to cache sizes is yet to be
// proved here since we use OpenBLAS GEMM directly; also, note that we
// calculate `InputMatrix x KernelMatrix`, not the other way around.
void compute_block_sizes(int* kp, int* mp, int* np, int typesize) {
int l1, l2, l3;
query_cache_sizes(&l1, &l2, &l3);
// set the cache sizes to small numbers when debugging
int k = *kp;
int m = *mp;
int n = *np;
if (fmaxf(k, fmaxf(m, n)) < 50) {
return;
}
int nr = 4;
int num_reg = 16;
int mr = num_reg / (2 * nr) * typesize;
int k_strip = 8;
int k_div = (mr + nr) * typesize;
int k_sub = mr * nr * typesize;
const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1);
const int old_k = k;
if (k > max_kc) {
k = (k % max_kc) == 0 ? max_kc
: max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1)));
//assert (old_k / k == old_k / max_kc);
}
int max_nc;
const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864
const int lhs_bytes = m * k * typesize;
const int rest_l1 = l1 - k_sub - lhs_bytes;
if (rest_l1 >= nr * k * typesize) {
max_nc = rest_l1 / (k * typesize);
} else {
max_nc = (3 * actual_l2) / (4 * max_kc * typesize);
}
int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1));
if (n > nc) {
n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1))));
} else if (old_k == k) {
int kn_size = k * n * typesize;
int actual_lm = actual_l2;
int max_mc = m;
if (kn_size < 1024) {
actual_lm = l1;
} else if (l3 != 0 && kn_size <= 32768) {
actual_lm = l2;
max_mc = fminf(576, max_mc);
}
int mc = fminf(actual_lm / (3 * k * typesize), max_mc);
if (mc > mr) {
mc -= mc % mr;
}
else if (mc == 0) {
*kp = k; *mp = m; *np = n;
return;
}
m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1))));
}
*kp = k; *mp = m; *np = n;
return;
}
#endif /* OWL_CORE_CONV_IMPL */
#ifdef OWL_ENABLE_TEMPLATE
#ifdef AVX_PSIZE
/*
* Fill in temporary input matrix from input tensor with vectorisation.
* Currently only support AVX instruciton set.
*/
void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k,
int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart,
int rstart, int input_cols, int input_rows, short reverse_mode
) {
// assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input
// matrix can always be loaded consecutively by a step of AVX_PSIZE
for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREA(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
}
*cmk_ptr += AVX_PSIZE;
}
return;
}
void ACX_FUN_LOAD (load_sub_matrix, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc,
int k, int kernel_ri, int input_ri, int in_channel, int idx_base,
int cstart, int rstart, int input_cols, int input_rows,
int kernel_rows, short reverse_mode
){
int ik = 0;
// first, load `kc_strip` numbers with a step of AVX_PSIZE;
// assume `kc_strip % AVX_PSIZE == 0`
for ( ; ik < kc_strip; ik += AVX_PSIZE) {
const int cr_set[2] = {(k + ik) / in_channel,
(k + ik + AVX_PSIZE - 1) / in_channel};
const int c_set[2] = {cr_set[0] / kernel_rows,
cr_set[1] / kernel_rows};
const int cols[2] = {cstart + c_set[0], cstart + c_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (cols[0] >= input_cols || cols[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
else if (cols[0] == cols[1]) {
const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows,
cr_set[1] - c_set[1] * kernel_rows};
const int rows[2] = {rstart + r_set[0], rstart + r_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (rows[0] >= input_rows || rows[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
// next AVX_PSIZE numbers can be loaded consecutively
else if (rows[0] >= 0 && rows[1] < input_rows) {
int ki = k + ik - cr_set[0] * in_channel;
int input_index = idx_base + cols[0] * input_ri
+ rows[0] * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREU(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
*cmk_ptr += AVX_PSIZE;
continue;
}
}
// previous special cases do not apply; calculate input index one by one
for (int ip = 0; ip < AVX_PSIZE; ip++) {
int kc = (k + ik + ip) / kernel_ri;
int kri = (k + ik + ip) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
}
// second, load the rest `actual_kc - kc_strip` numbers
for (; ik < actual_kc; ik++) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
return;
}
#endif /* AVX_PSIZE */
/*
* GEBP-based implementation. See Goto et.al [08] for detail.
*/
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_cr = kernel_cols * kernel_rows;
const int kernel_ri = kernel_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
// if generated input matrix is small enough, use im2col implementation
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
for (int k = 0; k < kernel_cri; k += kc) {
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
int actual_kc = fminf(k + kc, kernel_cri) - k;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
// iterate along each row of the generated input matrix; processing four
// rows in parallel with the help of e.g. OpenMP should be possible
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_base = b * input_cri;
// fill in the sub input matrix
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
int idx_kn_base = k * out_channel;
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
// fill in the kernel matrix
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
actual_mc, actual_nc, actual_kc, ALPHA,
temp_mk, actual_kc, temp_kn, actual_nc,
BETA, temp_mn, actual_nc);
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = (ix + m) * out_channel + (iy + n);
output_ptr[index_mn] += temp_mn[cmn++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
idx_mn_base += n;
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
actual_mc, actual_kc, actual_nc, ALPHA,
temp_mn, actual_nc, temp_kn, actual_nc,
BETA, temp_mk, actual_kc);
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 1);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
input_ptr[input_index] += temp_mk[cmk];
}
cmk++;
}
#endif
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_mn_base += n;
idx_kn_base += n;
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
memset(temp_kn, 0, nc * kc * sizeof(TYPE));
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
actual_nc, actual_kc, actual_mc, ALPHA,
temp_mn, actual_nc, temp_mk, actual_kc,
BETA, temp_kn, actual_kc);
int cnk = 0;
for (int jn = 0; jn < actual_nc; jn++) {
for (int ik = 0; ik < actual_kc; ik++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
kernel_ptr[index_kn] = temp_kn[cnk++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
valid.yolo2.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_64_272_272_32_3_3.h"
#include "gen_ukr_A4B2gemm_1_64_272_272_32_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 272;
int Ny = 272;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<32+0;c5+=32)
{
for(int xy5=0;xy5<73984+0;xy5+=73984)
{
for(int f5=0;f5<64+0;f5+=64)
{
for(int c4=c5;c4<min(32, 32+c5);c4+=32)
{
for(int xy4=xy5;xy4<min(73984, 73984+xy5);xy4+=73984)
{
for(int f4=f5;f4<min(64, 64+f5);f4+=64)
{
for(int c3=c4;c3<min(32, 32+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(64, 64+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(73984, 73984+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(73984, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(64, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(32, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(32, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(73984, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(64, 16+f2);f1+=16)
{
int ctile=min(Tc1, 32-c1);
int x1=xy1/272;
int y1=xy1%272/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*2402432+c1_1*75076+1*x1*274+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*4608+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*4734976+of1_1*73984+x1*272+y1*1+of1_2*1;
if(272-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(272*272-xy1>=6){
for(int sti=272-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=272-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
residualbased_block_builder_and_solver_with_constraints_elementwise.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Aditya Ghantasala
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER_WITH_CONSTRAINTS_ELEMENTWISE)
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER_WITH_CONSTRAINTS_ELEMENTWISE
/* System includes */
#include <unordered_set>
#include <unordered_map>
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "includes/master_slave_constraint.h"
#include "utilities/helper_classes_for_constraint_builder.h"
#include "includes/key_hash.h"
#include "containers/pointer_vector_map.h"
#include "containers/pointer_hash_map_set.h"
#include "containers/data_value_container.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similar to the calculation of the total residual
* @author Aditya Ghantasala
*/
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise
: public ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise);
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodeType NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef MasterSlaveConstraint MasterSlaveConstraintType;
typedef typename MasterSlaveConstraint::Pointer MasterSlaveConstraintPointerType;
typedef Internals::AuxiliaryGlobalMasterSlaveConstraint AuxiliaryGlobalMasterSlaveConstraintType;
typedef Internals::GlobalMasterSlaveRelationContainerType GlobalMasterSlaveRelationContainerType;
typedef std::vector<IndexType> EquationIdVectorType;
typedef std::vector<IndexType> VectorIndexType;
typedef std::vector<Dof<double>::Pointer> DofsVectorType;
typedef Vector VectorType;
typedef Internals::ConstraintImposer<TSparseSpace, TDenseSpace, TLinearSolver> ConstraintImposerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SetUpSystem(
ModelPart &rModelPart) override
{
BaseType::SetUpSystem(rModelPart);
if(rModelPart.NumberOfMasterSlaveConstraints() > 0)
{
FormulateGlobalMasterSlaveRelations(rModelPart);
}
}
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b) override
{
if(mGlobalMasterSlaveConstraints.size() > 0)
BuildWithConstraints(pScheme, rModelPart, A, b);
else
BaseType::Build(pScheme, rModelPart, A, b);
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
if(mGlobalMasterSlaveConstraints.size() > 0)
BuildAndSolveWithConstraints(pScheme, rModelPart, A, Dx, b);
else
BaseType::BuildAndSolve(pScheme, rModelPart, A, Dx, b);
}
void InitializeSolutionStep(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, A, Dx, b);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; k++)
{
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation ids.
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise failed to initialize solution step.")
}
void FinalizeSolutionStep(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
BaseType::FinalizeSolutionStep(rModelPart, A, Dx, b);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; k++)
{
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise failed to finalize solution step.")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart) override
{
if(mGlobalMasterSlaveConstraints.size() > 0)
ConstructMatrixStructureWithConstraints(pScheme, A, rModelPart);
else
BaseType::ConstructMatrixStructure(pScheme, A, rModelPart);
}
/*
* This function is exactly same as the ConstructMatrixStructure() function in base class except that the function
* has the call to ApplyConstraints function call once the element and conditions compute their equation ids
*/
virtual void ConstructMatrixStructureWithConstraints(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// To Impose constraints
ConstraintImposerType constraint_imposer(mGlobalMasterSlaveConstraints);
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
ModelPart::MasterSlaveConstraintContainerType::iterator constraints_begin = rModelPart.MasterSlaveConstraints().begin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids, constraint_imposer)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
constraint_imposer.template ApplyConstraints<Element>(*i_element, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids, constraint_imposer)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
constraint_imposer.template ApplyConstraints<Condition>(*i_condition, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
Element::EquationIdVectorType aux_ids(3, 0);
const int nconstraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
#pragma omp parallel for firstprivate(nconstraints, ids, aux_ids)
for (int iii = 0; iii<nconstraints; iii++) {
ModelPart::MasterSlaveConstraintContainerType::iterator i_constraint = constraints_begin + iii;
i_constraint->EquationIdVector(ids, aux_ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
for (std::size_t i = 0; i < aux_ids.size(); i++) {
lock_array[aux_ids[i]].SetLock();
auto& row_indices = indices[aux_ids[i]];
row_indices.insert(aux_ids.begin(), aux_ids.end());
lock_array[aux_ids[i]].UnSetLock();
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void BuildAndSolveWithConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b)
{
KRATOS_TRY
const double start_update_constraints = OpenMPUtils::GetCurrentTime();
this->UpdateConstraintsForBuilding(rModelPart);
const double stop_update_constraints = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Constraints update time : " << stop_update_constraints - start_update_constraints << std::endl;
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
this->ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
this->SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
const double start_reconstruct_slaves = OpenMPUtils::GetCurrentTime();
ReconstructSlaveSolutionAfterSolve(rModelPart, A, Dx, b);
const double stop_reconstruct_slaves = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Reconstruct slaves time: " << stop_reconstruct_slaves - start_reconstruct_slaves << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/*
* This function is exactly same as the Build() function in base class except that the function
* has the call to ApplyConstraints function call once the LHS or RHS are computed by elements and conditions
*/
void BuildWithConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
ConstraintImposerType constraint_imposer(mGlobalMasterSlaveConstraints);
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
const ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId, constraint_imposer)
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
constraint_imposer.template ApplyConstraints<Element>(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, BaseType::mlock_array);
#else
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
//#pragma omp parallel for firstprivate(nconditions, LHS_Contribution, RHS_Contribution, EquationId ) schedule(dynamic, 1024)
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
constraint_imposer.template ApplyConstraints<Condition>(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, BaseType::mlock_array);
#else
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl;
//for (int i = 0; i < A_size; i++)
// omp_destroy_lock(&lock_array[i]);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList, AuxiliarDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
typedef std::unordered_set < typename NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
set_type dof_global_set;
dof_global_set.reserve(nelements*20);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
#pragma omp parallel firstprivate(nelements, ElementalDofList, AuxiliarDofList)
{
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_tmp_set.insert(ElementalDofList.begin(), ElementalDofList.end());
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "Initializing condition loop" << std::endl;
ConditionsArrayType& pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_tmp_set.insert(ElementalDofList.begin(), ElementalDofList.end());
}
auto& pConstraints = rModelPart.MasterSlaveConstraints();
const int nconstraints = static_cast<int>(pConstraints.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconstraints; i++)
{
auto it = pConstraints.begin() + i;
// gets list of Dof involved on every element
it->GetDofList(ElementalDofList, AuxiliarDofList, CurrentProcessInfo);
dofs_tmp_set.insert(ElementalDofList.begin(), ElementalDofList.end());
dofs_tmp_set.insert(AuxiliarDofList.begin(), AuxiliarDofList.end());
}
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( it->get() );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
// This is the set of condenced global constraints.
GlobalMasterSlaveRelationContainerType mGlobalMasterSlaveConstraints; //This can be changed to more efficient implementation later on.
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief this method condenses the MasterSlaveConstraints which are added on the rModelPart
* into objects of AuxilaryGlobalMasterSlaveRelation. One unique object for each unique slave.
* these will be used in the ApplyConstraints functions later on.
* @param rModelPart The model part of the problem to solve
*/
void FormulateGlobalMasterSlaveRelations(ModelPart& rModelPart)
{
KRATOS_TRY
const double start_formulate = OpenMPUtils::GetCurrentTime();
// First delete the existing ones
mGlobalMasterSlaveConstraints.clear();
// Getting the array of the conditions
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// Getting the beginning iterator
const ModelPart::MasterSlaveConstraintContainerType::iterator constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel for schedule(guided, 512)
for (int i_constraints = 0; i_constraints < number_of_constraints; i_constraints++)
{
ModelPart::MasterSlaveConstraintContainerType::iterator it = constraints_begin;
std::advance(it, i_constraints);
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool constraint_is_active = true;
if ((it)->IsDefined(ACTIVE))
constraint_is_active = (it)->Is(ACTIVE);
if (constraint_is_active)
{
//assemble the Constraint contribution
#pragma omp critical
AssembleConstraint(*it, r_current_process_info);
}
}
const double stop_formulate = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Formulate global constraints time: " << stop_formulate - start_formulate << std::endl;
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::FormulateGlobalMasterSlaveRelations failed ..");
}
/**
* @brief this method assembles the given master slave constraint to the auxiliary global master slave constraints
* @param rMasterSlaveConstraint object of the master slave constraint to be assembled.
* @param rCurrentProcessInfo current process info.
*/
void AssembleConstraint(ModelPart::MasterSlaveConstraintType& rMasterSlaveConstraint, ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY
int slave_count = 0;
LocalSystemMatrixType relation_matrix(0,0);
LocalSystemVectorType constant_vector(0);
EquationIdVectorType slave_equation_ids(0);
EquationIdVectorType master_equation_ids(0);
//get the equation Ids of the constraint
rMasterSlaveConstraint.EquationIdVector(slave_equation_ids, master_equation_ids, rCurrentProcessInfo);
//calculate constraint's T and b matrices
rMasterSlaveConstraint.CalculateLocalSystem(relation_matrix, constant_vector, rCurrentProcessInfo);
for (auto slave_equation_id : slave_equation_ids)
{
int master_count = 0;
auto global_constraint = mGlobalMasterSlaveConstraints.find(slave_equation_id);
if (global_constraint == mGlobalMasterSlaveConstraints.end())
{
mGlobalMasterSlaveConstraints[slave_equation_id] = Kratos::make_unique<AuxiliaryGlobalMasterSlaveConstraintType>(slave_equation_id);
}
global_constraint = mGlobalMasterSlaveConstraints.find(slave_equation_id);
for (auto master_equation_id : master_equation_ids)
{
global_constraint->second->AddMaster(master_equation_id, relation_matrix(slave_count, master_count));
master_count++;
}
slave_count++;
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::AssembleSlaves failed ...");
}
/**
* @brief this method resets the LHS and RHS values of the AuxilaryGlobalMasterSlaveRelation objects
*/
void ResetConstraintRelations()
{
KRATOS_TRY
const int number_of_constraints = static_cast<int>(mGlobalMasterSlaveConstraints.size());
// Getting the beginning iterator
const GlobalMasterSlaveRelationContainerType::iterator constraints_begin = mGlobalMasterSlaveConstraints.begin();
#pragma omp parallel for schedule(guided, 512)
for (int i_constraints = 0; i_constraints < number_of_constraints; ++i_constraints)
{
//GlobalMasterSlaveRelationContainerType::iterator it = constraints_begin + i_constraints;
GlobalMasterSlaveRelationContainerType::iterator it = constraints_begin;
std::advance(it, i_constraints);
(it->second)->Reset();
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ResetConstraintRelations failed to reset constraint relations..");
}
/**
* @brief this method uses the MasterSlaveConstraints objects in rModelPart to reconstruct the LHS and RHS values
* of the AuxilaryGlobalMasterSlaveRelation objects. That is the value of Slave as LHS and the T*M+C as RHS value
* @param rModelPart The model part of the problem to solve
*/
void UpdateConstraintsForBuilding(ModelPart& rModelPart)
{
KRATOS_TRY
// Reset the constraint equations
ResetConstraintRelations();
// Getting the array of the conditions
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// Getting the beginning iterator
const ModelPart::MasterSlaveConstraintContainerType::iterator constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel for schedule(guided, 512)
for (int i_constraints = 0; i_constraints < number_of_constraints; i_constraints++)
{
ModelPart::MasterSlaveConstraintContainerType::iterator it = constraints_begin;
std::advance(it, i_constraints);
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool constraint_is_active = true;
if ((it)->IsDefined(ACTIVE))
constraint_is_active = (it)->Is(ACTIVE);
if (constraint_is_active)
{
UpdateMasterSlaveConstraint(*it, r_current_process_info);
}
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::UpdateConstraintsForBuilding failed ..");
}
/**
* @brief this method uses the MasterSlaveConstraints objects in rModelPart to reconstruct the LHS and RHS values
* of the individual AuxilaryGlobalMasterSlaveRelation object. That is the value of Slave as LHS and the T*M+C as RHS value
* @param rMasterSlaveConstraint The MasterSlaveConstraint which is to be updated
*/
void UpdateMasterSlaveConstraint(ModelPart::MasterSlaveConstraintType& rMasterSlaveConstraint, ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY
//contributions to the system
LocalSystemMatrixType relation_matrix(0,0);
LocalSystemVectorType constant_vector(0);
EquationIdVectorType slave_equation_ids(0);
EquationIdVectorType master_equation_ids(0);
//get the equation Ids of the constraint
rMasterSlaveConstraint.EquationIdVector(slave_equation_ids, master_equation_ids, rCurrentProcessInfo);
//calculate constraint's T and b matrices
rMasterSlaveConstraint.CalculateLocalSystem(relation_matrix, constant_vector, rCurrentProcessInfo);
// For calculating the constant
MasterSlaveConstraintType::DofPointerVectorType slave_dofs_vector;
MasterSlaveConstraintType::DofPointerVectorType master_dofs_vector;
rMasterSlaveConstraint.GetDofList(slave_dofs_vector, master_dofs_vector, rCurrentProcessInfo);
int slave_index = 0;
for (auto &slave_dof : slave_dofs_vector)
{
double slave_value_calc = 0.0;
for (IndexType master_index = 0; master_index < master_dofs_vector.size(); master_index++)
{
slave_value_calc += master_dofs_vector[master_index]->GetSolutionStepValue() * relation_matrix(slave_index, master_index);
}
slave_value_calc += constant_vector[slave_index];
auto global_constraint = mGlobalMasterSlaveConstraints.find(slave_dof->EquationId());
global_constraint->second->SetLeftHandSide( slave_dof->GetSolutionStepValue() );
global_constraint->second->UpdateRightHandSide(slave_value_calc);
slave_index++;
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::UpdateMasterSlaveConstraint failed ..");
}
/**
* @brief This method reconstructs the slave solution after Solving.
* @param rModelPart Reference to the ModelPart containing the problem.
* @param A System matrix
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual)
*/
void ReconstructSlaveSolutionAfterSolve(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb)
{
KRATOS_TRY
const int number_of_constraints = static_cast<int>(mGlobalMasterSlaveConstraints.size());
// Getting the beginning iterator
const GlobalMasterSlaveRelationContainerType::iterator constraints_begin = mGlobalMasterSlaveConstraints.begin();
//contributions to the system
VectorType master_weights_vector;
double constant = 0.0;
IndexType slave_equation_id = 0;
EquationIdVectorType master_equation_ids = EquationIdVectorType(0);
#pragma omp parallel for schedule(guided, 512) firstprivate(slave_equation_id, master_equation_ids, master_weights_vector, constant)
for (int i_constraints = 0; i_constraints < number_of_constraints; i_constraints++)
{
//GlobalMasterSlaveRelationContainerType::iterator it = constraints_begin + i_constraints;
GlobalMasterSlaveRelationContainerType::iterator it = constraints_begin;
std::advance(it, i_constraints);
double slave_dx_value = 0.0;
//get the equation Ids of the constraint
(it->second)->EquationIdsVector(slave_equation_id, master_equation_ids);
//calculate constraint's T and b matrices
(it->second)->CalculateLocalSystem(master_weights_vector, constant);
int master_index = 0;
for (auto &master_equation_id : master_equation_ids)
{
slave_dx_value += TSparseSpace::GetValue(rDx, master_equation_id) * master_weights_vector(master_index);
master_index++;
}
slave_dx_value += constant;
rDx[slave_equation_id] = slave_dx_value; // this access is always unique for an object so no need of special care for openmp
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ReconstructSlaveSolutionAfterSolve failed ..");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
906223_gcc_so4.c | #define _POSIX_C_SOURCE 200809L
#define START_TIMER(S) \
struct timeval start_##S, end_##S; \
gettimeofday(&start_##S, NULL);
#define STOP_TIMER(S, T) \
gettimeofday(&end_##S, NULL); \
T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000;
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw);
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
//for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3))
//{
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
START_TIMER(section0)
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1)
{
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1)
{
for (int time = t_blk, t0 = (time + 2) % (3), t1 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_id_vec, save_src_u_vec, source_id_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads);
//bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
}
}
}
/* End section0 */
}
STOP_TIMER(section0, timers)
for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3))
{
START_TIMER(section1)
/* Begin section1 */
/* End section1 */
STOP_TIMER(section1, timers)
}
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data;
if (x0_blk0_size == 0 || y0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(damp, u, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r8 = 1.0 / dt;
float r7 = 1.0 / (dt * dt);
float r6 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = (r6 * (-r7 * (u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F * u[t1][x - time + 4][y - time + 4][z + 4])) + r8 * (damp[x - time + 1][y - time + 1][z + 1] * u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F * (u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F * (u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F * u[t1][x - time + 4][y - time + 4][z + 4]) / (r6 * r7 + r8 * damp[x - time + 1][y - time + 1][z + 1]);
}
#pragma omp simd aligned(damp, u, vp : 32)
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_id[x - time][y - time][sp_zi];
float r0 = save_src_u[tw][source_id[x - time][y - time][zind]];
// * source_mask[x - time][y - time][zind];
u[t2][x - time + 4][y - time + 4][zind + 4] += r0;
}
}
}
}
}
}
}
|
3loops.c | /*
* Only the first level loop index variable should be private
* */
#include <stdio.h>
#if defined (_OPENMP)
#include <omp.h>
#endif
int main(void)
{
int i,jj,kkk;
double a[10][9][8];
#pragma omp parallel for
for(i=0;i<10;i++){
for(jj=0;jj<9;jj++){
for (kkk=0;kkk<8;kkk++){
a[i][jj][kkk]=9.9;
// printf("a[%d][%d][%d]=%f ",i,jj,kkk,a[i][jj][kkk]);
}
}
}
return 0;
}
/* Other loops are not affected by omp directive, so their index variables are still shared!
* Affected ==> collapse(n) or ordered (n)
*/
|
omp_section_private.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_section_private()
{
int sum;
int sum0;
int i;
int known_sum;
sum = 7;
sum0 = 0;
#pragma omp parallel
{
#pragma omp sections private(sum0,i)
{
#pragma omp section
{
sum0 = 0;
for (i = 1; i < 400; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
}
}
#pragma omp section
{
sum0 = 0;
for (i = 400; i < 700; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
}
}
#pragma omp section
{
sum0 = 0;
for (i = 700; i < 1000; i++)
sum0 = sum0 + i;
#pragma omp critical
{
sum = sum + sum0;
}
}
} /*end of sections*/
} /* end of parallel */
known_sum = (999 * 1000) / 2 + 7;
return (known_sum == sum);
} /* end of check_section_private*/
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_section_private()) {
num_failed++;
}
}
return num_failed;
}
|
LinkedCells.h | /**
* @file LinkedCells.h
*
* @author tchipevn
* @date 17.02.2018
*/
#pragma once
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CellBasedParticleContainer.h"
#include "autopas/containers/CellBlock3D.h"
#include "autopas/containers/CompatibleTraversals.h"
#include "autopas/containers/LoadEstimators.h"
#include "autopas/containers/cellPairTraversals/BalancedTraversal.h"
#include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/iterators/RegionParticleIterator.h"
#include "autopas/options/DataLayoutOption.h"
#include "autopas/options/LoadEstimatorOption.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/ParticleCellHelpers.h"
#include "autopas/utils/StringUtils.h"
#include "autopas/utils/WrapOpenMP.h"
#include "autopas/utils/inBox.h"
namespace autopas {
/**
* LinkedCells class.
* This class uses a list of neighboring cells to store the particles.
* These cells dimensions are at least as large as the given cutoff radius,
* therefore short-range interactions only need to be calculated between
* particles in neighboring cells.
* @tparam Particle type of the Particle
*/
template <class Particle>
class LinkedCells : public CellBasedParticleContainer<FullParticleCell<Particle>> {
public:
/**
* Type of the ParticleCell.
*/
using ParticleCell = FullParticleCell<Particle>;
/**
* Type of the Particle.
*/
using ParticleType = typename ParticleCell::ParticleType;
/**
* Constructor of the LinkedCells class
* @param boxMin
* @param boxMax
* @param cutoff
* @param skin
* @param cellSizeFactor cell size factor relative to cutoff
* @param loadEstimator the load estimation algorithm for balanced traversals.
* By default all applicable traversals are allowed.
*/
LinkedCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff,
const double skin, const double cellSizeFactor = 1.0,
LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell)
: CellBasedParticleContainer<ParticleCell>(boxMin, boxMax, cutoff, skin),
_cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor),
_loadEstimator(loadEstimator) {}
/**
* @copydoc ParticleContainerInterface::getContainerType()
*/
[[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCells; }
/**
* @copydoc ParticleContainerInterface::getParticleCellTypeEnum()
*/
[[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; }
/**
* @copydoc ParticleContainerInterface::addParticleImpl()
*/
void addParticleImpl(const ParticleType &p) override {
ParticleCell &cell = _cellBlock.getContainingCell(p.getR());
cell.addParticle(p);
}
/**
* @copydoc ParticleContainerInterface::addHaloParticleImpl()
*/
void addHaloParticleImpl(const ParticleType &haloParticle) override {
ParticleType pCopy = haloParticle;
pCopy.setOwnershipState(OwnershipState::halo);
ParticleCell &cell = _cellBlock.getContainingCell(pCopy.getR());
cell.addParticle(pCopy);
}
/**
* @copydoc ParticleContainerInterface::updateHaloParticle()
*/
bool updateHaloParticle(const ParticleType &haloParticle) override {
ParticleType pCopy = haloParticle;
pCopy.setOwnershipState(OwnershipState::halo);
auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin());
for (auto cellptr : cells) {
bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy);
if (updated) {
return true;
}
}
AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString());
return false;
}
void deleteHaloParticles() override { _cellBlock.clearHaloCells(); }
void rebuildNeighborLists(TraversalInterface *traversal) override {
// nothing to do.
}
/**
* Generates the load estimation function depending on _loadEstimator.
* @return load estimator function object.
*/
BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() {
switch (this->_loadEstimator) {
case LoadEstimatorOption::squaredParticlesPerCell: {
return [&](const std::array<unsigned long, 3> &cellsPerDimension,
const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) {
return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner);
};
}
case LoadEstimatorOption::none:
[[fallthrough]];
default: {
return
[&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner,
const std::array<unsigned long, 3> &upperCorner) { return 1; };
}
}
}
void iteratePairwise(TraversalInterface *traversal) override {
// Check if traversal is allowed for this container and give it the data it needs.
auto *traversalInterface = dynamic_cast<LCTraversalInterface<ParticleCell> *>(traversal);
auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal);
if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) {
balancedTraversal->setLoadEstimator(getLoadEstimatorFunction());
}
if (traversalInterface && cellPairTraversal) {
cellPairTraversal->setCellsToTraverse(this->_cells);
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in LinkedCells::iteratePairwise. TraversalID: {}",
traversal->getTraversalType());
}
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
[[nodiscard]] std::vector<ParticleType> updateContainer() override {
this->deleteHaloParticles();
std::vector<ParticleType> invalidParticles;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif // AUTOPAS_OPENMP
{
// private for each thread!
std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles;
#ifdef AUTOPAS_OPENMP
#pragma omp for
#endif // AUTOPAS_OPENMP
for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) {
// Delete dummy particles of each cell.
this->getCells()[cellId].deleteDummyParticles();
// if empty
if (not this->getCells()[cellId].isNotEmpty()) continue;
auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId);
for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) {
// if not in cell
if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) {
myInvalidParticles.push_back(*pIter);
internal::deleteParticle(pIter);
}
}
}
// implicit barrier here
// the barrier is needed because iterators are not threadsafe w.r.t. addParticle()
// this loop is executed for every thread and thus parallel. Don't use #pragma omp for here!
for (auto &&p : myInvalidParticles) {
// if not in halo
if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) {
this->template addParticle<false>(p);
} else {
myInvalidNotOwnedParticles.push_back(p);
}
}
#ifdef AUTOPAS_OPENMP
#pragma omp critical
#endif
{
// merge private vectors to global one.
invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(),
myInvalidNotOwnedParticles.end());
}
}
return invalidParticles;
}
/**
* @copydoc ParticleContainerInterface::getTraversalSelectorInfo()
*/
[[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override {
return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(),
this->getCellBlock().getCellLength(), 0);
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, true> begin(
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
return ParticleIteratorWrapper<ParticleType, true>(
new internal::ParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, 0, &_cellBlock, behavior));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, false> begin(
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
return ParticleIteratorWrapper<ParticleType, false>(
new internal::ParticleIterator<ParticleType, ParticleCell, false>(&this->_cells, 0, &_cellBlock, behavior));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, true> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
// We increase the search region by skin, as particles can move over cell borders.
auto startIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin()));
auto stopIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin()));
size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) *
(stopIndex3D[2] - startIndex3D[2] + 1);
std::vector<size_t> cellsOfInterest(numCellsOfInterest);
int i = 0;
for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) {
for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) {
for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) {
cellsOfInterest[i++] =
utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo());
}
}
}
return ParticleIteratorWrapper<ParticleType, true>(
new internal::RegionParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, lowerCorner, higherCorner,
cellsOfInterest, &_cellBlock, behavior));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, false> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override {
// We increase the search region by skin, as particles can move over cell borders.
auto startIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin()));
auto stopIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin()));
size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) *
(stopIndex3D[2] - startIndex3D[2] + 1);
std::vector<size_t> cellsOfInterest(numCellsOfInterest);
int i = 0;
for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) {
for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) {
for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) {
cellsOfInterest[i++] =
utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo());
}
}
}
return ParticleIteratorWrapper<ParticleType, false>(
new internal::RegionParticleIterator<ParticleType, ParticleCell, false>(
&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior));
}
/**
* Get the cell block, not supposed to be used except by verlet lists
* @return the cell block
*/
internal::CellBlock3D<ParticleCell> &getCellBlock() { return _cellBlock; }
/**
* @copydoc getCellBlock()
* @note const version
*/
const internal::CellBlock3D<ParticleCell> &getCellBlock() const { return _cellBlock; }
/**
* Returns reference to the data of LinkedCells
* @return the data
*/
std::vector<ParticleCell> &getCells() { return this->_cells; }
protected:
/**
* object to manage the block of cells.
*/
internal::CellBlock3D<ParticleCell> _cellBlock;
/**
* load estimation algorithm for balanced traversals.
*/
autopas::LoadEstimatorOption _loadEstimator;
// ThreeDimensionalCellHandler
};
} // namespace autopas |
convolution_1x1_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
#if __aarch64__
#if 1
#include "gemm_symm_int8.h"
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(outch, inch, (size_t)1u);
const int8_t* a = _kernel;
int8_t* sa = kernel_tm;
reorder_a((int8_t*)a, sa, outch, inch, inch);
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t* pData = bottom_blob;
int8_t* pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int32_t* pc = top_blob;
const int8_t* pa = kernel;
const int8_t* pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, 0, 0, opt);
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat scales_tm(m);
ncnn::Mat bias_tm(m);
float* scales = scales_tm;
const float* bias = _bias;
// outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
// the equation could convert to:
// out = float2int8( (float)sum * (scale_requant_in * scale_requant_out) + (bias * scale_requant_out) )
// prebuild the list of (scales_requant_in*scale_requant_out)
for (size_t i = 0; i < m; ++i)
{
scales_tm[i] = scales_requant[2 * i] * scales_requant[2 * i + 1];
}
if (!_bias.empty())
{
for (size_t i = 0; i < m; ++i)
{
bias_tm[i] = bias[i] * scales_requant[2 * i + 1];
}
bias = bias_tm;
}
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t* pData = bottom_blob;
int8_t* pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int8_t* pc = top_blob;
const int8_t* pa = kernel;
const int8_t* pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, scales, (float*)bias, opt);
}
#else
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p + 0) * inch;
const signed char* k1 = kernel + (p + 1) * inch;
const signed char* k2 = kernel + (p + 2) * inch;
const signed char* k3 = kernel + (p + 3) * inch;
signed char* ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch;
signed char* ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
for (; i + 3 < size; i += 4)
{
signed char* tmpptr = bottom_tm.channel(i / 4);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n" // r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n" // for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n" // for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n" // w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n" // r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n" // for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n" // realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n" // w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
"st1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4);
#if 0 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+3<inch; q=q+4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
vst1q_lane_s32(outptr0, _sum, 0);
vst1q_lane_s32(outptr1, _sum, 1);
vst1q_lane_s32(outptr2, _sum, 2);
vst1q_lane_s32(outptr3, _sum, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i + 3 < size; i += 4)
{
signed char* tmpptr = bottom_tm.channel(i / 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if __ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q < inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
vst1q_s32(outptr0, _sum);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
#endif
outptr0 += 4;
}
for (; i < size; i++)
{
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int q = 0;
int sum0 = 0;
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p + 1);
signed char* outptr2 = top_blob.channel(p + 2);
signed char* outptr3 = top_blob.channel(p + 3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
const float bias2 = bias ? bias[p + 2] : 0.f;
const float bias3 = bias ? bias[p + 3] : 0.f;
const float scale_requant_in0 = scales_requant[2 * p];
const float scale_requant_out0 = scales_requant[2 * p + 1];
const float scale_requant_in1 = scales_requant[2 * (p + 1)];
const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1];
const float scale_requant_in2 = scales_requant[2 * (p + 2)];
const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1];
const float scale_requant_in3 = scales_requant[2 * (p + 3)];
const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1];
float32x4_t _bias03, _scale_in03, _scale_out03;
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _bias1 = vdupq_n_f32(bias1);
float32x4_t _bias2 = vdupq_n_f32(bias2);
float32x4_t _bias3 = vdupq_n_f32(bias3);
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
int i = 0;
for (; i + 3 < size; i += 4)
{
signed char* tmpptr = bottom_tm.channel(i / 4);
const signed char* kptr = kernel.channel(p / 4);
#if 1 //__ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n" // r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n" // for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n" // for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n" // w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n" // r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n" // for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n" // realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n" // w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v22.4s, v22.4s \n"
"scvtf v23.4s, v23.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, %17.s[0] \n"
"fmul v21.4s, v21.4s, %17.s[1] \n"
"fmul v22.4s, v22.4s, %17.s[2] \n"
"fmul v23.4s, v23.4s, %17.s[3] \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, %13.4s \n"
"fadd v21.4s, v21.4s, %14.4s \n"
"fadd v22.4s, v22.4s, %15.4s \n"
"fadd v23.4s, v23.4s, %16.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, %18.s[0] \n"
"fmul v21.4s, v21.4s, %18.s[1] \n"
"fmul v22.4s, v22.4s, %18.s[2] \n"
"fmul v23.4s, v23.4s, %18.s[3] \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v22.4s, v22.4s \n"
"fcvtas v23.4s, v23.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn v8.4h, v22.4s \n"
"sqxtn2 v8.8h, v23.4s \n"
// top_s16 -> top_s8
"sqxtn v0.8b, v7.8h \n"
"sqxtn v1.8b, v8.8h \n"
// save top_s8
"st1 {v0.s}[0], [%0] \n"
"st1 {v0.s}[1], [%1] \n"
"st1 {v1.s}[0], [%2] \n"
"st1 {v1.s}[1], [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias0), // %13
"w"(_bias1), // %14
"w"(_bias2), // %15
"w"(_bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q + 1 < inch; q = q + 2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q < inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in03);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias03);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out03);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_lane_s8(outptr0, _sum_s8, 0);
vst1_lane_s8(outptr1, _sum_s8, 1);
vst1_lane_s8(outptr2, _sum_s8, 2);
vst1_lane_s8(outptr3, _sum_s8, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
int i = 0;
for (; i + 3 < size; i += 4)
{
signed char* tmpptr = bottom_tm.channel(i / 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q < inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias0);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_s8(outptr0, _sum_s8);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q = 0;
for (; q + 1 < inch; q = q + 2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out);
outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out);
outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out);
#endif
outptr0 += 4;
}
for (; i < size; i++)
{
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int q = 0;
int sum0 = 0;
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0++;
}
}
}
#endif
#else
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const signed char* kernel0 = kernel + (p + 0) * inch;
const signed char* kernel1 = kernel + (p + 1) * inch;
const signed char* kernel2 = kernel + (p + 2) * inch;
const signed char* kernel3 = kernel + (p + 3) * inch;
signed char* ktmp = kernel_tm.channel(p / 4);
for (int q = 0; q < inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p < outch; p++)
{
const signed char* kernel0 = kernel + p * inch;
signed char* ktmp = kernel_tm.channel(p / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
/*
* Convolution 1x1 quantized with sgemm int8
*/
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// interleave
Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0");
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 8);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d7}, [%4]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n" // a30-a37
"vmovl.s8 q4, d6 \n" // a20-a27
"vmovl.s8 q3, d5 \n" // a10-a17
"vmovl.s8 q2, d4 \n" // a00-a07
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q8, d4, d0[1] \n" // sum1 = (a00-a07) * k10
"vmlal.s16 q9, d5, d0[1] \n"
"vmlal.s16 q10, d4, d0[2] \n" // sum2 = (a00-a07) * k20
"vmlal.s16 q11, d5, d0[2] \n"
"vmlal.s16 q12, d4, d0[3] \n" // sum3 = (a00-a07) * k30
"vmlal.s16 q13, d5, d0[3] \n"
"vmlal.s16 q6, d6, d1[0] \n" // sum0 += (a10-a17) * k01
"vmlal.s16 q7, d7, d1[0] \n"
"vmlal.s16 q8, d6, d1[1] \n" // sum1 += (a10-a17) * k11
"vmlal.s16 q9, d7, d1[1] \n"
"vmlal.s16 q10, d6, d1[2] \n" // sum2 += (a10-a17) * k21
"vmlal.s16 q11, d7, d1[2] \n"
"vmlal.s16 q12, d6, d1[3] \n" // sum3 += (a10-a17) * k31
"vmlal.s16 q13, d7, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n" // tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
"vst1.s32 {d16-d19}, [%1]! \n"
"vst1.s32 {d20-d23}, [%2]! \n"
"vst1.s32 {d24-d27}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q = 0; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n" // a20-a23,a30-a33
"vmovl.s8 q2, d4 \n" // a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00-a03) * k00
"vmlal.s16 q7, d4, d0[1] \n" // sum1 = (a00-a03) * k10
"vmlal.s16 q8, d4, d0[2] \n" // sum2 = (a00-a03) * k20
"vmlal.s16 q9, d4, d0[3] \n" // sum3 = (a00-a03) * k30
"vmlal.s16 q6, d5, d1[0] \n" // sum0 += (a10-a13) * k01
"vmlal.s16 q7, d5, d1[1] \n" // sum1 += (a10-a13) * k11
"vmlal.s16 q8, d5, d1[2] \n" // sum2 += (a10-a13) * k21
"vmlal.s16 q9, d5, d1[3] \n" // sum3 += (a10-a13) * k31
"vmlal.s16 q6, d6, d2[0] \n" // sum0 += (a20-a23) * k02
"vmlal.s16 q7, d6, d2[1] \n" // sum1 += (a20-a23) * k12
"vmlal.s16 q8, d6, d2[2] \n" // sum2 += (a20-a23) * k22
"vmlal.s16 q9, d6, d2[3] \n" // sum3 += (a20-a23) * k32
"vmlal.s16 q6, d7, d3[0] \n" // sum0 += (a30-a33) * k03
"vmlal.s16 q7, d7, d3[1] \n" // sum1 += (a30-a33) * k13
"vmlal.s16 q8, d7, d3[2] \n" // sum2 += (a30-a33) * k23
"vmlal.s16 q9, d7, d3[3] \n" // sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n" // tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
"vst1.s32 {d14-d15}, [%1]! \n"
"vst1.s32 {d16-d17}, [%2]! \n"
"vst1.s32 {d18-d19}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q = 0; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n" // tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n" // a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n" // (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n" // (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n" // (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n" // (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 8);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n" // a30-a37
"vmovl.s8 q4, d6 \n" // a20-a27
"vmovl.s8 q3, d5 \n" // a10-a17
"vmovl.s8 q2, d4 \n" // a00-a07
"vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n" // k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n" // (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n" // (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n" // (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n" // (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n" // tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n" // (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n" // a20-a23,a30-a33
"vmovl.s8 q2, d4 \n" // a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n" // k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n" // (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n" // (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n" // (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n" // (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n" // tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n" // (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int q = 0;
int sum0 = 0;
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm int8
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// int* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// int sum = 0;
//
// const signed char* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const signed char* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0");
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p + 1);
signed char* outptr2 = top_blob.channel(p + 2);
signed char* outptr3 = top_blob.channel(p + 3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p + 1] : 0.f;
const float bias2 = bias ? bias[p + 2] : 0.f;
const float bias3 = bias ? bias[p + 3] : 0.f;
const float scale_requant_in0 = scales_requant[2 * p];
const float scale_requant_out0 = scales_requant[2 * p + 1];
const float scale_requant_in1 = scales_requant[2 * (p + 1)];
const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1];
const float scale_requant_in2 = scales_requant[2 * (p + 2)];
const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1];
const float scale_requant_in3 = scales_requant[2 * (p + 3)];
const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1];
#if __ARM_NEON
float32x4_t _bias03, _scale_in03, _scale_out03;
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
#endif // __ARM_NEON
int i = 0;
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 8);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d31}, [%4]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d31 \n" // a30-a37
"vmovl.s8 q4, d30 \n" // a20-a27
"vmovl.s8 q15, d29 \n" // a10-a17
"vmovl.s8 q14, d28 \n" // a00-a07
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00-a07) * k00
"vmlal.s16 q7, d29, d0[0] \n"
"vmlal.s16 q8, d28, d0[1] \n" // sum1 = (a00-a07) * k10
"vmlal.s16 q9, d29, d0[1] \n"
"vmlal.s16 q10, d28, d0[2] \n" // sum2 = (a00-a07) * k20
"vmlal.s16 q11, d29, d0[2] \n"
"vmlal.s16 q12, d28, d0[3] \n" // sum3 = (a00-a07) * k30
"vmlal.s16 q13, d29, d0[3] \n"
"vmlal.s16 q6, d30, d1[0] \n" // sum0 += (a10-a17) * k01
"vmlal.s16 q7, d31, d1[0] \n"
"vmlal.s16 q8, d30, d1[1] \n" // sum1 += (a10-a17) * k11
"vmlal.s16 q9, d31, d1[1] \n"
"vmlal.s16 q10, d30, d1[2] \n" // sum2 += (a10-a17) * k21
"vmlal.s16 q11, d31, d1[2] \n"
"vmlal.s16 q12, d30, d1[3] \n" // sum3 += (a10-a17) * k31
"vmlal.s16 q13, d31, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n" // tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[0] \n"
"vmul.f32 q8, q8, %e17[1] \n"
"vmul.f32 q9, q9, %e17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q14 \n"
"vadd.f32 q8, q8, q15 \n"
"vadd.f32 q9, q9, q15 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
// sum1
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %e18[1] \n"
"vmul.f32 q1, q9, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.8 {d16}, [%1]! \n"
// sum2
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
"vcvt.f32.s32 q13, q13 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %f17[0] \n"
"vmul.f32 q11, q11, %f17[0] \n"
"vmul.f32 q12, q12, %f17[1] \n"
"vmul.f32 q13, q13, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, q4 \n"
"vadd.f32 q11, q11, q4 \n"
"vadd.f32 q12, q12, q5 \n"
"vadd.f32 q13, q13, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %f18[0] \n"
"vmul.f32 q1, q11, %f18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d20, q0 \n"
"vqmovn.s32 d21, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d20, q10 \n"
// save top_s8
"vst1.8 {d20}, [%2]! \n"
// sum3
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q12, %f18[1] \n"
"vmul.f32 q1, q13, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d24, q0 \n"
"vqmovn.s32 d25, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d24, q12 \n"
// save top_s8
"vst1.8 {d24}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q = 0; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[4] = float2int8(((float)sum0_4 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[5] = float2int8(((float)sum0_5 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[6] = float2int8(((float)sum0_6 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[7] = float2int8(((float)sum0_7 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[4] = float2int8(((float)sum1_4 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[5] = float2int8(((float)sum1_5 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[6] = float2int8(((float)sum1_6 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[7] = float2int8(((float)sum1_7 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[4] = float2int8(((float)sum2_4 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[5] = float2int8(((float)sum2_5 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[6] = float2int8(((float)sum2_6 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[7] = float2int8(((float)sum2_7 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[4] = float2int8(((float)sum3_4 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[5] = float2int8(((float)sum3_5 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[6] = float2int8(((float)sum3_6 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[7] = float2int8(((float)sum3_7 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d29}, [%4]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q15, d29 \n" // a20-a23,a30-a33
"vmovl.s8 q14, d28 \n" // a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00-a03) * k00
"vmlal.s16 q7, d28, d0[1] \n" // sum1 = (a00-a03) * k10
"vmlal.s16 q8, d28, d0[2] \n" // sum2 = (a00-a03) * k20
"vmlal.s16 q9, d28, d0[3] \n" // sum3 = (a00-a03) * k30
"vmlal.s16 q6, d29, d1[0] \n" // sum0 += (a10-a13) * k01
"vmlal.s16 q7, d29, d1[1] \n" // sum1 += (a10-a13) * k11
"vmlal.s16 q8, d29, d1[2] \n" // sum2 += (a10-a13) * k21
"vmlal.s16 q9, d29, d1[3] \n" // sum3 += (a10-a13) * k31
"vmlal.s16 q6, d30, d2[0] \n" // sum0 += (a20-a23) * k02
"vmlal.s16 q7, d30, d2[1] \n" // sum1 += (a20-a23) * k12
"vmlal.s16 q8, d30, d2[2] \n" // sum2 += (a20-a23) * k22
"vmlal.s16 q9, d30, d2[3] \n" // sum3 += (a20-a23) * k32
"vmlal.s16 q6, d31, d3[0] \n" // sum0 += (a30-a33) * k03
"vmlal.s16 q7, d31, d3[1] \n" // sum1 += (a30-a33) * k13
"vmlal.s16 q8, d31, d3[2] \n" // sum2 += (a30-a33) * k23
"vmlal.s16 q9, d31, d3[3] \n" // sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n" // tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0-1
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[1] \n"
"vmul.f32 q8, q8, %f17[0] \n"
"vmul.f32 q9, q9, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.s32 {d12[0]}, [%0]! \n"
"vst1.s32 {d12[1]}, [%1]! \n"
// sum1-2
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %f18[0] \n"
"vmul.f32 q1, q9, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.s32 {d16[0]}, [%2]! \n"
"vst1.s32 {d16[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q = 0; q < inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n" // tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n" // a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n" // k02-k32,k03-k33
"vmovl.s8 q0, d0 \n" // k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n" // (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n" // (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n" // (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n" // (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, %q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12[0]}, [%0]! \n"
"vst1.8 {d12[1]}, [%1]! \n"
"vst1.8 {d12[2]}, [%2]! \n"
"vst1.8 {d12[3]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias03), // %13
"w"(_scale_in03), // %14
"w"(_scale_out03) // %15
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
#if __ARM_NEON
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
#endif // __ARM_NEON
int i = 0;
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 8);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n" // a30-a37
"vmovl.s8 q4, d6 \n" // a20-a27
"vmovl.s8 q3, d5 \n" // a10-a17
"vmovl.s8 q2, d4 \n" // a00-a07
"vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n" // k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n" // (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n" // (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n" // (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n" // (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n" // tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n" // (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
"vmul.f32 q7, q7, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
"vadd.f32 q7, q7, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
"vmul.f32 q1, q7, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out);
outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out);
outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out);
outptr0[4] = float2int8(((float)sum4 * scale_requant_in + bias0) * scale_requant_out);
outptr0[5] = float2int8(((float)sum5 * scale_requant_in + bias0) * scale_requant_out);
outptr0[6] = float2int8(((float)sum6 * scale_requant_in + bias0) * scale_requant_out);
outptr0[7] = float2int8(((float)sum7 * scale_requant_in + bias0) * scale_requant_out);
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n" // r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n" // for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n" // a20-a23,a30-a33
"vmovl.s8 q2, d4 \n" // a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n" // k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n" // (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n" // (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n" // (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n" // (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"1: \n"
// remain loop
"and r4, %6, #3 \n" // r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n" // for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n" // tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n" // (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n" // store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
"vst1.s32 {d12[0]}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q = 0; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out);
outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out);
outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out);
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int q = 0;
int sum0 = 0;
for (; q < inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0++;
}
}
}
#endif
|
GB_binop__second_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int8)
// A*D function (colscale): GB (_AxD__second_int8)
// D*A function (rowscale): GB (_DxB__second_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_int8)
// C=A'+scalar GB (_bind2nd_tran__second_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
define-directive.c | //test expansion of define directives
void foo(int a[10][10])
{
int i;
int j;
#define MY_SMP_PRIVATE i,j
#pragma omp parallel private (MY_SMP_PRIVATE)
#pragma omp for
for (j =0; j< 10; j++)
for (i =0; i< 10; i++)
a[i][j] = 0;
}
|
__clang_hip_cmath.h | /*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __CLANG_HIP_CMATH_H__
#define __CLANG_HIP_CMATH_H__
#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <limits>
#include <type_traits>
#include <utility>
#endif
#include <limits.h>
#include <stdint.h>
#endif // !defined(__HIPCC_RTC__)
#pragma push_macro("__DEVICE__")
#pragma push_macro("__CONSTEXPR__")
#ifdef __OPENMP_AMDGCN__
#define __DEVICE__ static __attribute__((always_inline, nothrow))
#define __CONSTEXPR__ constexpr
#else
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
#define __CONSTEXPR__
#endif // __OPENMP_AMDGCN__
// Start with functions that cannot be defined by DEF macros below.
#if defined(__cplusplus)
#if defined __OPENMP_AMDGCN__
__DEVICE__ __CONSTEXPR__ float fabs(float __x) { return ::fabsf(__x); }
__DEVICE__ __CONSTEXPR__ float sin(float __x) { return ::sinf(__x); }
__DEVICE__ __CONSTEXPR__ float cos(float __x) { return ::cosf(__x); }
#endif
__DEVICE__ __CONSTEXPR__ double abs(double __x) { return ::fabs(__x); }
__DEVICE__ __CONSTEXPR__ float abs(float __x) { return ::fabsf(__x); }
__DEVICE__ __CONSTEXPR__ long long abs(long long __n) { return ::llabs(__n); }
__DEVICE__ __CONSTEXPR__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ __CONSTEXPR__ float fma(float __x, float __y, float __z) {
return ::fmaf(__x, __y, __z);
}
#if !defined(__HIPCC_RTC__)
// The value returned by fpclassify is platform dependent, therefore it is not
// supported by hipRTC.
__DEVICE__ __CONSTEXPR__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
__DEVICE__ __CONSTEXPR__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
#endif // !defined(__HIPCC_RTC__)
__DEVICE__ __CONSTEXPR__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
#if defined(__OPENMP_AMDGCN__)
// For OpenMP we work around some old system headers that have non-conforming
// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
// this by providing two versions of these functions, differing only in the
// return type. To avoid conflicting definitions we disable implicit base
// function generation. That means we will end up with two specializations, one
// per type, but only one has a base function defined by the system header.
#pragma omp begin declare variant match( \
implementation = {extension(disable_implicit_base)})
// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
// add a suffix. This means we would clash with the names of the variants
// (note that we do not create implicit base functions here). To avoid
// this clash we add a new trait to some of them that is always true
// (this is LLVM after all ;)). It will only influence the mangled name
// of the variants inside the inner region and avoid the clash.
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
__DEVICE__ __CONSTEXPR__ int isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ __CONSTEXPR__ int isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ __CONSTEXPR__ int isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ __CONSTEXPR__ int isfinite(double __x) { return ::__finite(__x); }
__DEVICE__ __CONSTEXPR__ int isnan(float __x) { return ::__isnanf(__x); }
__DEVICE__ __CONSTEXPR__ int isnan(double __x) { return ::__isnan(__x); }
#pragma omp end declare variant
#endif // defined(__OPENMP_AMDGCN__)
__DEVICE__ __CONSTEXPR__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ __CONSTEXPR__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ __CONSTEXPR__ bool isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ __CONSTEXPR__ bool isfinite(double __x) { return ::__finite(__x); }
__DEVICE__ __CONSTEXPR__ bool isnan(float __x) { return ::__isnanf(__x); }
__DEVICE__ __CONSTEXPR__ bool isnan(double __x) { return ::__isnan(__x); }
#if defined(__OPENMP_AMDGCN__)
#pragma omp end declare variant
#endif // defined(__OPENMP_AMDGCN__)
__DEVICE__ __CONSTEXPR__ bool isgreater(float __x, float __y) {
return __builtin_isgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreater(double __x, double __y) {
return __builtin_isgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreaterequal(float __x, float __y) {
return __builtin_isgreaterequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreaterequal(double __x, double __y) {
return __builtin_isgreaterequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isless(float __x, float __y) {
return __builtin_isless(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isless(double __x, double __y) {
return __builtin_isless(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessequal(float __x, float __y) {
return __builtin_islessequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessequal(double __x, double __y) {
return __builtin_islessequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessgreater(float __x, float __y) {
return __builtin_islessgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessgreater(double __x, double __y) {
return __builtin_islessgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isnormal(float __x) {
return __builtin_isnormal(__x);
}
__DEVICE__ __CONSTEXPR__ bool isnormal(double __x) {
return __builtin_isnormal(__x);
}
__DEVICE__ __CONSTEXPR__ bool isunordered(float __x, float __y) {
return __builtin_isunordered(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isunordered(double __x, double __y) {
return __builtin_isunordered(__x, __y);
}
__DEVICE__ __CONSTEXPR__ float modf(float __x, float *__iptr) {
return ::modff(__x, __iptr);
}
__DEVICE__ __CONSTEXPR__ float pow(float __base, int __iexp) {
return ::powif(__base, __iexp);
}
__DEVICE__ __CONSTEXPR__ double pow(double __base, int __iexp) {
return ::powi(__base, __iexp);
}
__DEVICE__ __CONSTEXPR__ float remquo(float __x, float __y, int *__quo) {
return ::remquof(__x, __y, __quo);
}
__DEVICE__ __CONSTEXPR__ float scalbln(float __x, long int __n) {
return ::scalblnf(__x, __n);
}
__DEVICE__ __CONSTEXPR__ bool signbit(float __x) { return ::__signbitf(__x); }
__DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Notably missing above is nexttoward. We omit it because
// ocml doesn't provide an implementation, and we don't want to be in the
// business of implementing tricky libm functions in this header.
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
return __ocml_fma_f16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
}
#ifndef __OPENMP_AMDGCN__
// BEGIN DEF_FUN and HIP_OVERLOAD
// BEGIN DEF_FUN
#pragma push_macro("__DEF_FUN1")
#pragma push_macro("__DEF_FUN2")
#pragma push_macro("__DEF_FUN2_FI")
// Define cmath functions with float argument and returns __retty.
#define __DEF_FUN1(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x) { return __func##f(__x); }
// Define cmath functions with two float arguments and returns __retty.
#define __DEF_FUN2(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x, float __y) { \
return __func##f(__x, __y); \
}
// Define cmath functions with a float and an int argument and returns __retty.
#define __DEF_FUN2_FI(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x, int __y) { \
return __func##f(__x, __y); \
}
__DEF_FUN1(float, acos)
__DEF_FUN1(float, acosh)
__DEF_FUN1(float, asin)
__DEF_FUN1(float, asinh)
__DEF_FUN1(float, atan)
__DEF_FUN2(float, atan2)
__DEF_FUN1(float, atanh)
__DEF_FUN1(float, cbrt)
__DEF_FUN1(float, ceil)
__DEF_FUN2(float, copysign)
__DEF_FUN1(float, cos)
__DEF_FUN1(float, cosh)
__DEF_FUN1(float, erf)
__DEF_FUN1(float, erfc)
__DEF_FUN1(float, exp)
__DEF_FUN1(float, exp2)
__DEF_FUN1(float, expm1)
__DEF_FUN1(float, fabs)
__DEF_FUN2(float, fdim)
__DEF_FUN1(float, floor)
__DEF_FUN2(float, fmax)
__DEF_FUN2(float, fmin)
__DEF_FUN2(float, fmod)
__DEF_FUN2(float, hypot)
__DEF_FUN1(int, ilogb)
__DEF_FUN2_FI(float, ldexp)
__DEF_FUN1(float, lgamma)
__DEF_FUN1(float, log)
__DEF_FUN1(float, log10)
__DEF_FUN1(float, log1p)
__DEF_FUN1(float, log2)
__DEF_FUN1(float, logb)
__DEF_FUN1(long long, llrint)
__DEF_FUN1(long long, llround)
__DEF_FUN1(long, lrint)
__DEF_FUN1(long, lround)
__DEF_FUN1(float, nearbyint)
__DEF_FUN2(float, nextafter)
__DEF_FUN2(float, pow)
__DEF_FUN2(float, remainder)
__DEF_FUN1(float, rint)
__DEF_FUN1(float, round)
__DEF_FUN2_FI(float, scalbn)
__DEF_FUN1(float, sin)
__DEF_FUN1(float, sinh)
__DEF_FUN1(float, sqrt)
__DEF_FUN1(float, tan)
__DEF_FUN1(float, tanh)
__DEF_FUN1(float, tgamma)
__DEF_FUN1(float, trunc)
#pragma pop_macro("__DEF_FUN1")
#pragma pop_macro("__DEF_FUN2")
#pragma pop_macro("__DEF_FUN2_FI")
// END DEF_FUN
// BEGIN HIP_OVERLOAD
#pragma push_macro("__HIP_OVERLOAD1")
#pragma push_macro("__HIP_OVERLOAD2")
// __hip_enable_if::type is a type function which returns __T if __B is true.
template <bool __B, class __T = void> struct __hip_enable_if {};
template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
namespace __hip {
template <class _Tp> struct is_integral {
enum { value = 0 };
};
template <> struct is_integral<bool> {
enum { value = 1 };
};
template <> struct is_integral<char> {
enum { value = 1 };
};
template <> struct is_integral<signed char> {
enum { value = 1 };
};
template <> struct is_integral<unsigned char> {
enum { value = 1 };
};
template <> struct is_integral<wchar_t> {
enum { value = 1 };
};
template <> struct is_integral<short> {
enum { value = 1 };
};
template <> struct is_integral<unsigned short> {
enum { value = 1 };
};
template <> struct is_integral<int> {
enum { value = 1 };
};
template <> struct is_integral<unsigned int> {
enum { value = 1 };
};
template <> struct is_integral<long> {
enum { value = 1 };
};
template <> struct is_integral<unsigned long> {
enum { value = 1 };
};
template <> struct is_integral<long long> {
enum { value = 1 };
};
template <> struct is_integral<unsigned long long> {
enum { value = 1 };
};
// ToDo: specializes is_arithmetic<_Float16>
template <class _Tp> struct is_arithmetic {
enum { value = 0 };
};
template <> struct is_arithmetic<bool> {
enum { value = 1 };
};
template <> struct is_arithmetic<char> {
enum { value = 1 };
};
template <> struct is_arithmetic<signed char> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned char> {
enum { value = 1 };
};
template <> struct is_arithmetic<wchar_t> {
enum { value = 1 };
};
template <> struct is_arithmetic<short> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned short> {
enum { value = 1 };
};
template <> struct is_arithmetic<int> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned int> {
enum { value = 1 };
};
template <> struct is_arithmetic<long> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned long> {
enum { value = 1 };
};
template <> struct is_arithmetic<long long> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned long long> {
enum { value = 1 };
};
template <> struct is_arithmetic<float> {
enum { value = 1 };
};
template <> struct is_arithmetic<double> {
enum { value = 1 };
};
struct true_type {
static const __constant__ bool value = true;
};
struct false_type {
static const __constant__ bool value = false;
};
template <typename __T, typename __U> struct is_same : public false_type {};
template <typename __T> struct is_same<__T, __T> : public true_type {};
template <typename __T> struct add_rvalue_reference { typedef __T &&type; };
template <typename __T> typename add_rvalue_reference<__T>::type declval();
// decltype is only available in C++11 and above.
#if __cplusplus >= 201103L
// __hip_promote
template <class _Tp> struct __numeric_type {
static void __test(...);
static _Float16 __test(_Float16);
static float __test(float);
static double __test(char);
static double __test(int);
static double __test(unsigned);
static double __test(long);
static double __test(unsigned long);
static double __test(long long);
static double __test(unsigned long long);
static double __test(double);
// No support for long double, use double instead.
static double __test(long double);
typedef decltype(__test(declval<_Tp>())) type;
static const bool value = !is_same<type, void>::value;
};
template <> struct __numeric_type<void> { static const bool value = true; };
template <class _A1, class _A2 = void, class _A3 = void,
bool = __numeric_type<_A1>::value &&__numeric_type<_A2>::value
&&__numeric_type<_A3>::value>
class __promote_imp {
public:
static const bool value = false;
};
template <class _A1, class _A2, class _A3>
class __promote_imp<_A1, _A2, _A3, true> {
private:
typedef typename __promote_imp<_A1>::type __type1;
typedef typename __promote_imp<_A2>::type __type2;
typedef typename __promote_imp<_A3>::type __type3;
public:
typedef decltype(__type1() + __type2() + __type3()) type;
static const bool value = true;
};
template <class _A1, class _A2> class __promote_imp<_A1, _A2, void, true> {
private:
typedef typename __promote_imp<_A1>::type __type1;
typedef typename __promote_imp<_A2>::type __type2;
public:
typedef decltype(__type1() + __type2()) type;
static const bool value = true;
};
template <class _A1> class __promote_imp<_A1, void, void, true> {
public:
typedef typename __numeric_type<_A1>::type type;
static const bool value = true;
};
template <class _A1, class _A2 = void, class _A3 = void>
class __promote : public __promote_imp<_A1, _A2, _A3> {};
#endif //__cplusplus >= 201103L
} // namespace __hip
// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
// floor(double).
#define __HIP_OVERLOAD1(__retty, __fn) \
template <typename __T> \
__DEVICE__ __CONSTEXPR__ \
typename __hip_enable_if<__hip::is_integral<__T>::value, __retty>::type \
__fn(__T __x) { \
return ::__fn((double)__x); \
}
// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
// or integer argument to avoid compilation error due to ambibuity. e.g.
// max(5.0f, 6.0) is resolved with max(double, double).
#if __cplusplus >= 201103L
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
__DEVICE__ __CONSTEXPR__ typename __hip_enable_if< \
__hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value, \
typename __hip::__promote<__T1, __T2>::type>::type \
__fn(__T1 __x, __T2 __y) { \
typedef typename __hip::__promote<__T1, __T2>::type __result_type; \
return __fn((__result_type)__x, (__result_type)__y); \
}
#else
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
__DEVICE__ __CONSTEXPR__ \
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && \
__hip::is_arithmetic<__T2>::value, \
__retty>::type \
__fn(__T1 __x, __T2 __y) { \
return __fn((double)__x, (double)__y); \
}
#endif
__HIP_OVERLOAD1(double, acos)
__HIP_OVERLOAD1(double, acosh)
__HIP_OVERLOAD1(double, asin)
__HIP_OVERLOAD1(double, asinh)
__HIP_OVERLOAD1(double, atan)
__HIP_OVERLOAD2(double, atan2)
__HIP_OVERLOAD1(double, atanh)
__HIP_OVERLOAD1(double, cbrt)
__HIP_OVERLOAD1(double, ceil)
__HIP_OVERLOAD2(double, copysign)
__HIP_OVERLOAD1(double, cos)
__HIP_OVERLOAD1(double, cosh)
__HIP_OVERLOAD1(double, erf)
__HIP_OVERLOAD1(double, erfc)
__HIP_OVERLOAD1(double, exp)
__HIP_OVERLOAD1(double, exp2)
__HIP_OVERLOAD1(double, expm1)
__HIP_OVERLOAD1(double, fabs)
__HIP_OVERLOAD2(double, fdim)
__HIP_OVERLOAD1(double, floor)
__HIP_OVERLOAD2(double, fmax)
__HIP_OVERLOAD2(double, fmin)
__HIP_OVERLOAD2(double, fmod)
#if !defined(__HIPCC_RTC__)
__HIP_OVERLOAD1(int, fpclassify)
#endif // !defined(__HIPCC_RTC__)
__HIP_OVERLOAD2(double, hypot)
__HIP_OVERLOAD1(int, ilogb)
__HIP_OVERLOAD1(bool, isfinite)
__HIP_OVERLOAD2(bool, isgreater)
__HIP_OVERLOAD2(bool, isgreaterequal)
__HIP_OVERLOAD1(bool, isinf)
__HIP_OVERLOAD2(bool, isless)
__HIP_OVERLOAD2(bool, islessequal)
__HIP_OVERLOAD2(bool, islessgreater)
__HIP_OVERLOAD1(bool, isnan)
__HIP_OVERLOAD1(bool, isnormal)
__HIP_OVERLOAD2(bool, isunordered)
__HIP_OVERLOAD1(double, lgamma)
__HIP_OVERLOAD1(double, log)
__HIP_OVERLOAD1(double, log10)
__HIP_OVERLOAD1(double, log1p)
__HIP_OVERLOAD1(double, log2)
__HIP_OVERLOAD1(double, logb)
__HIP_OVERLOAD1(long long, llrint)
__HIP_OVERLOAD1(long long, llround)
__HIP_OVERLOAD1(long, lrint)
__HIP_OVERLOAD1(long, lround)
__HIP_OVERLOAD1(double, nearbyint)
__HIP_OVERLOAD2(double, nextafter)
__HIP_OVERLOAD2(double, pow)
__HIP_OVERLOAD2(double, remainder)
__HIP_OVERLOAD1(double, rint)
__HIP_OVERLOAD1(double, round)
__HIP_OVERLOAD1(bool, signbit)
__HIP_OVERLOAD1(double, sin)
__HIP_OVERLOAD1(double, sinh)
__HIP_OVERLOAD1(double, sqrt)
__HIP_OVERLOAD1(double, tan)
__HIP_OVERLOAD1(double, tanh)
__HIP_OVERLOAD1(double, tgamma)
__HIP_OVERLOAD1(double, trunc)
// Overload these but don't add them to std, they are not part of cmath.
__HIP_OVERLOAD2(double, max)
__HIP_OVERLOAD2(double, min)
// Additional Overloads that don't quite match HIP_OVERLOAD.
#if __cplusplus >= 201103L
template <typename __T1, typename __T2, typename __T3>
__DEVICE__ __CONSTEXPR__ typename __hip_enable_if<
__hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value &&
__hip::is_arithmetic<__T3>::value,
typename __hip::__promote<__T1, __T2, __T3>::type>::type
fma(__T1 __x, __T2 __y, __T3 __z) {
typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type;
return ::fma((__result_type)__x, (__result_type)__y, (__result_type)__z);
}
#else
template <typename __T1, typename __T2, typename __T3>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value &&
__hip::is_arithmetic<__T3>::value,
double>::type
fma(__T1 __x, __T2 __y, __T3 __z) {
return ::fma((double)__x, (double)__y, (double)__z);
}
#endif
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
frexp(__T __x, int *__exp) {
return ::frexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
ldexp(__T __x, int __exp) {
return ::ldexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
modf(__T __x, double *__exp) {
return ::modf((double)__x, __exp);
}
#if __cplusplus >= 201103L
template <typename __T1, typename __T2>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value,
typename __hip::__promote<__T1, __T2>::type>::type
remquo(__T1 __x, __T2 __y, int *__quo) {
typedef typename __hip::__promote<__T1, __T2>::type __result_type;
return ::remquo((__result_type)__x, (__result_type)__y, __quo);
}
#else
template <typename __T1, typename __T2>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value,
double>::type
remquo(__T1 __x, __T2 __y, int *__quo) {
return ::remquo((double)__x, (double)__y, __quo);
}
#endif
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbln(__T __x, long int __exp) {
return ::scalbln((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbn(__T __x, int __exp) {
return ::scalbn((double)__x, __exp);
}
#pragma pop_macro("__HIP_OVERLOAD1")
#pragma pop_macro("__HIP_OVERLOAD2")
// END HIP_OVERLOAD
// END DEF_FUN and HIP_OVERLOAD
#endif // ifndef __OPENMP_AMDGCN__
#endif // defined(__cplusplus)
#ifndef __OPENMP_AMDGCN__
// Define these overloads inside the namespace our standard library uses.
#if !defined(__HIPCC_RTC__)
#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
_LIBCPP_BEGIN_NAMESPACE_STD
#else
namespace std {
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif // _LIBCPP_BEGIN_NAMESPACE_STD
// Pull the new overloads we defined above into namespace std.
// using ::abs; - This may be considered for C++.
using ::acos;
using ::acosh;
using ::asin;
using ::asinh;
using ::atan;
using ::atan2;
using ::atanh;
using ::cbrt;
using ::ceil;
using ::copysign;
using ::cos;
using ::cosh;
using ::erf;
using ::erfc;
using ::exp;
using ::exp2;
using ::expm1;
using ::fabs;
using ::fdim;
using ::floor;
using ::fma;
using ::fmax;
using ::fmin;
using ::fmod;
using ::fpclassify;
using ::frexp;
using ::hypot;
using ::ilogb;
using ::isfinite;
using ::isgreater;
using ::isgreaterequal;
using ::isless;
using ::islessequal;
using ::islessgreater;
using ::isnormal;
using ::isunordered;
using ::ldexp;
using ::lgamma;
using ::llrint;
using ::llround;
using ::log;
using ::log10;
using ::log1p;
using ::log2;
using ::logb;
using ::lrint;
using ::lround;
using ::modf;
// using ::nan; - This may be considered for C++.
// using ::nanf; - This may be considered for C++.
// using ::nanl; - This is not yet defined.
using ::nearbyint;
using ::nextafter;
// using ::nexttoward; - Omit this since we do not have a definition.
using ::pow;
using ::remainder;
using ::remquo;
using ::rint;
using ::round;
using ::scalbln;
using ::scalbn;
using ::signbit;
using ::sin;
using ::sinh;
using ::sqrt;
using ::tan;
using ::tanh;
using ::tgamma;
using ::trunc;
// Well this is fun: We need to pull these symbols in for libc++, but we can't
// pull them in with libstdc++, because its ::isinf and ::isnan are different
// than its std::isinf and std::isnan.
#ifndef __GLIBCXX__
using ::isinf;
using ::isnan;
#endif
// Finally, pull the "foobarf" functions that HIP defines into std.
using ::acosf;
using ::acoshf;
using ::asinf;
using ::asinhf;
using ::atan2f;
using ::atanf;
using ::atanhf;
using ::cbrtf;
using ::ceilf;
using ::copysignf;
using ::cosf;
using ::coshf;
using ::erfcf;
using ::erff;
using ::exp2f;
using ::expf;
using ::expm1f;
using ::fabsf;
using ::fdimf;
using ::floorf;
using ::fmaf;
using ::fmaxf;
using ::fminf;
using ::fmodf;
using ::frexpf;
using ::hypotf;
using ::ilogbf;
using ::ldexpf;
using ::lgammaf;
using ::llrintf;
using ::llroundf;
using ::log10f;
using ::log1pf;
using ::log2f;
using ::logbf;
using ::logf;
using ::lrintf;
using ::lroundf;
using ::modff;
using ::nearbyintf;
using ::nextafterf;
// using ::nexttowardf; - Omit this since we do not have a definition.
using ::powf;
using ::remainderf;
using ::remquof;
using ::rintf;
using ::roundf;
using ::scalblnf;
using ::scalbnf;
using ::sinf;
using ::sinhf;
using ::sqrtf;
using ::tanf;
using ::tanhf;
using ::tgammaf;
using ::truncf;
#ifdef _LIBCPP_END_NAMESPACE_STD
_LIBCPP_END_NAMESPACE_STD
#else
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_END_NAMESPACE_VERSION
#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
} // namespace std
#endif // _LIBCPP_END_NAMESPACE_STD
#endif // !defined(__HIPCC_RTC__)
// Define device-side math functions from <ymath.h> on MSVC.
#if !defined(__HIPCC_RTC__)
#if defined(_MSC_VER)
// Before VS2019, `<ymath.h>` is also included in `<limits>` and other headers.
// But, from VS2019, it's only included in `<complex>`. Need to include
// `<ymath.h>` here to ensure C functions declared there won't be markded as
// `__host__` and `__device__` through `<complex>` wrapper.
#include <ymath.h>
#if defined(__cplusplus)
extern "C" {
#endif // defined(__cplusplus)
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Cosh(double x,
double y) {
return cosh(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FCosh(float x,
float y) {
return coshf(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _Dtest(double *p) {
return fpclassify(*p);
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _FDtest(float *p) {
return fpclassify(*p);
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Sinh(double x,
double y) {
return sinh(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FSinh(float x,
float y) {
return sinhf(x) * y;
}
#if defined(__cplusplus)
}
#endif // defined(__cplusplus)
#endif // defined(_MSC_VER)
#endif // !defined(__HIPCC_RTC__)
#endif // ifndef __OPENMP_AMDGCN__
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__CONSTEXPR__")
#endif // __CLANG_HIP_CMATH_H__
|
gen_matrices.c | /**
* Copyright (c) 2016, Kevin Lewi
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "gen_matrices.h"
/**
* Main function
*
* Must be called with the parameters n, p, and a seed, separated by spaces
*
*/
int main(int argc, char *argv[]) {
print_random_matrices_with_adj(argv[1], argv[2], argv[3], argv[4]);
return 0;
}
void fmpz_mat_mul_modp(fmpz_mat_t a, fmpz_mat_t b, fmpz_mat_t c, int n,
fmpz_t p) {
fmpz_mat_mul(a, b, c);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_mod(fmpz_mat_entry(a, i, j), fmpz_mat_entry(a, i, j), p);
}
}
}
void print_random_matrices_with_adj(char *n_str, char *p_str, char *simulated,
char *seed) {
int n = atoi(n_str);
int is_simulated_setup = atoi(simulated);
cryptorand_t randstate;
cryptorand_initseed(randstate, seed ? seed : "", NULL);
fmpz_t modp;
fmpz_init(modp);
fmpz_set_str(modp, p_str, 10);
fmpz_mat_t a;
fmpz_mat_init(a, n, n);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_randm_crypto(fmpz_mat_entry(a, i, j), randstate, modp);
}
}
fmpz_t det;
fmpz_init(det);
fmpz_mat_t adjugate;
fmpz_mat_init(adjugate, n, n);
fmpz_mat_t prod;
fmpz_mat_init(prod, n, n);
fmpz_mat_t check;
fmpz_mat_init(check, n, n);
if(is_simulated_setup) {
/* set det and adj randomly */
fmpz_randm_crypto(det, randstate, modp);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_randm_crypto(fmpz_mat_entry(adjugate, i, j), randstate, modp);
}
}
} else {
fmpz_modp_matrix_det(det, a, n, modp);
if (fmpz_is_zero(det)) {
fprintf(stderr, "ERROR: Random matrix was not invertible.\n");
goto exit_det;
}
fmpz_modp_matrix_adjugate(adjugate, a, n, modp);
fmpz_mat_transpose(adjugate, adjugate);
fmpz_mat_mul_modp(prod, a, adjugate, n, modp);
/* check that the adjugate and determinant were computed correctly */
fmpz_mat_one(check);
fmpz_mat_scalar_mul_fmpz(check, check, det);
int status = fmpz_mat_equal(prod, check);
if (status == 0) {
fprintf(stderr, "ERROR: Failed to produce the proper matrices.\n");
goto exit;
}
}
/* print the resulting values */
fmpz_fprint(stdout, det);
printf("\n");
fmpz_mat_fprint(stdout, a);
printf("\n");
fmpz_mat_transpose(adjugate, adjugate);
fmpz_mat_fprint(stdout, adjugate);
printf("\n");
exit:
fmpz_mat_clear(a);
fmpz_mat_clear(prod);
fmpz_mat_clear(check);
exit_det:
fmpz_mat_clear(adjugate);
fmpz_clear(det);
cryptorand_clear(randstate);
}
void fmpz_modp_matrix_det(fmpz_t det, fmpz_mat_t a, int n, fmpz_t p) {
assert(n >= 1);
if(n == 1) {
fmpz_set(det, fmpz_mat_entry(a, 0, 0));
return;
}
if (n == 2) {
fmpz_t tmp1;
fmpz_init(tmp1);
fmpz_mul(tmp1, fmpz_mat_entry(a,0,0), fmpz_mat_entry(a,1,1));
fmpz_mod(tmp1, tmp1, p);
fmpz_t tmp2;
fmpz_init(tmp2);
fmpz_mul(tmp2, fmpz_mat_entry(a,1,0), fmpz_mat_entry(a,0,1));
fmpz_mod(tmp2, tmp2, p);
fmpz_sub(det, tmp1, tmp2);
fmpz_mod(det, det, p);
fmpz_clear(tmp1);
fmpz_clear(tmp2);
return;
}
fmpz_mat_t m;
fmpz_mat_init_set(m, a);
fmpz_t tmp;
fmpz_init(tmp);
fmpz_t multfactor;
fmpz_init(multfactor);
int num_swaps = 0;
for(int j = 0; j < n; j++) {
for(int i = j+1; i < n; i++) {
if(fmpz_is_zero(fmpz_mat_entry(m, j, j))) {
// find first row that isn't a zero, and swap
int h;
for(h = j+1; h < n; h++) {
if(!fmpz_is_zero(fmpz_mat_entry(m, h, j))) {
// found the row
break;
}
}
if(h == n) {
// matrix is not invertible
fmpz_set_ui(det, 0);
fmpz_clear(multfactor);
fmpz_clear(tmp);
fmpz_mat_clear(m);
return;
}
// swap row h with row j
for(int k = 0; k < n; k++) {
fmpz_set(tmp, fmpz_mat_entry(m, h, k));
fmpz_set(fmpz_mat_entry(m, h, k), fmpz_mat_entry(m, j, k));
fmpz_set(fmpz_mat_entry(m, j, k), tmp);
}
num_swaps++;
}
fmpz_invmod(multfactor, fmpz_mat_entry(m, j, j), p);
fmpz_mul(multfactor, multfactor, fmpz_mat_entry(m, i, j));
fmpz_mod(multfactor, multfactor, p);
#pragma omp parallel for
for(int k = j; k < n; k++) {
fmpz_t tmp2;
fmpz_init(tmp2);
fmpz_mul(tmp2, fmpz_mat_entry(m, j, k), multfactor);
fmpz_sub(fmpz_mat_entry(m, i, k), fmpz_mat_entry(m, i, k), tmp2);
fmpz_mod(fmpz_mat_entry(m, i, k), fmpz_mat_entry(m, i, k), p);
fmpz_clear(tmp2);
}
}
}
fmpz_clear(multfactor);
fmpz_clear(tmp);
fmpz_set_ui(det, 1);
for(int j = 0; j < n; j++) {
fmpz_mul(det, det, fmpz_mat_entry(m, j, j));
}
if(num_swaps % 2 == 1) {
fmpz_neg(det, det);
}
fmpz_mod(det, det, p);
fmpz_mat_clear(m);
}
void fmpz_modp_matrix_adjugate(fmpz_mat_t b, fmpz_mat_t a, int n, fmpz_t p) {
if(n == 1) {
fmpz_set_ui(fmpz_mat_entry(b, 0, 0), 1);
return;
}
fmpz_t det;
fmpz_init(det);
fmpz_mat_t c;
fmpz_mat_init(c, n-1, n-1);
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
/* Form the adjoint a_ij */
for (int i_iter = 0, i1 = 0; i_iter < n; i_iter++, i1++) {
if (i_iter == i) {
i1--;
continue;
}
for (int j_iter = 0, j1 = 0; j_iter < n; j_iter++, j1++) {
if (j_iter == j) {
j1--;
continue;
}
fmpz_set(fmpz_mat_entry(c, i1, j1), fmpz_mat_entry(a, i_iter, j_iter));
}
}
/* Calculate the determinant */
fmpz_modp_matrix_det(det, c, n-1, p);
/* Fill in the elements of the adjugate */
if((i+j) % 2 == 1) {
fmpz_negmod(det, det, p);
}
fmpz_mod(det, det, p);
fmpz_set(fmpz_mat_entry(b, i, j), det);
}
}
fmpz_clear(det);
fmpz_mat_clear(c);
}
|
GB_binop__rminus_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int16)
// A*D function (colscale): GB (_AxD__rminus_int16)
// D*A function (rowscale): GB (_DxB__rminus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int16)
// C=scalar+B GB (_bind1st__rminus_int16)
// C=scalar+B' GB (_bind1st_tran__rminus_int16)
// C=A+scalar GB (_bind2nd__rminus_int16)
// C=A'+scalar GB (_bind2nd_tran__rminus_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
md5.h | /*
Header untuk melakukan hashing menggunakan metode MD5.
Sumber: https://github.com/pod32g/MD5/blob/master/md5.c
Dilakukan sedikit modifikasi sehingga tidak memerlukan
header <stdint.h>
*/
#ifndef MD5
#define MD5
#include <stdlib.h>
#include "parallel_string.h"
#include <omp.h>
// Constants are the integer part of the sines of integers (in radians) * 2^32.
const unsigned int k[64] = {
0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee ,
0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501 ,
0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be ,
0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821 ,
0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa ,
0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8 ,
0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed ,
0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a ,
0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c ,
0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70 ,
0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05 ,
0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665 ,
0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039 ,
0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1 ,
0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1 ,
0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391 };
// r specifies the per-round shift amounts
const unsigned int r[] = {7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22,
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20,
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23,
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21};
// leftrotate function definition
#define LEFTROTATE(x, c) (((x) << (c)) | ((x) >> (32 - (c))))
void to_bytes(unsigned int val, unsigned char *bytes)
{
register unsigned char i;
for (i = 0; i<4; i++){
bytes[i] = (unsigned char) (val >> (i*8));
}
}
unsigned int to_unsignedchar(const unsigned char *bytes)
{
return (unsigned int) bytes[0]
| ((unsigned int) bytes[1] << 8)
| ((unsigned int) bytes[2] << 16)
| ((unsigned int) bytes[3] << 24);
}
void md5(const unsigned char *initial_msg, size_t initial_len, unsigned char *digest) {
// These vars will contain the hash
unsigned int h0, h1, h2, h3;
// Message (to prepare)
unsigned char *msg = NULL;
size_t new_len, offset;
unsigned int w[16];
unsigned int a, b, c, d, i, f, g, temp;
// Initialize variables - simple count in nibbles:
h0 = 0x67452301;
h1 = 0xefcdab89;
h2 = 0x98badcfe;
h3 = 0x10325476;
//Pre-processing:
//append "1" bit to message
//append "0" bits until message length in bits ≡ 448 (mod 512)
//append length mod (2^64) to message
for (new_len = initial_len + 1; new_len % (64) != 56; new_len++)
;
msg = (unsigned char*)malloc(new_len + 8);
my_strcpy(msg, initial_msg);
msg[initial_len] = 0x80; // append the "1" bit; most significant bit is "first"
#pragma omp parallel for private(offset) shared(initial_len,new_len)
for (offset = initial_len + 1; offset < new_len; offset++)
msg[offset] = 0; // append "0" bits
// append the len in bits at the end of the buffer.
to_bytes(initial_len*8, msg + new_len);
// initial_len>>29 == initial_len*8>>32, but avoids overflow.
to_bytes(initial_len>>29, msg + new_len + 4);
// Process the message in successive 512-bit chunks:
//for each 512-bit chunk of message:
for(offset=0; offset<new_len; offset += 64) {
// break chunk into sixteen 32-bit words w[j], 0 ≤ j ≤ 15
#pragma omp for
for (i = 0; i < 16; i++){
w[i] = to_unsignedchar(msg + offset + i*4);
}
// Initialize hash value for this chunk:
a = h0;
b = h1;
c = h2;
d = h3;
// Main loop:
#pragma omp single
for(i = 0; i<64; i++) {
if (i < 16) {
f = (b & c) | ((~b) & d);
g = i;
} else if (i < 32) {
f = (d & b) | ((~d) & c);
g = (5*i + 1) % 16;
} else if (i < 48) {
f = b ^ c ^ d;
g = (3*i + 5) % 16;
} else {
f = c ^ (b | (~d));
g = (7*i) % 16;
}
temp = d;
d = c;
c = b;
b = b + LEFTROTATE((a + f + k[i] + w[g]), r[i]);
a = temp;
}
// Add this chunk's hash to result so far:
h0 += a;
h1 += b;
h2 += c;
h3 += d;
}
// cleanup
free(msg);
//var unsigned char digest[16] := h0 append h1 append h2 append h3 //(Output is in little-endian)
to_bytes(h0, digest);
to_bytes(h1, digest + 4);
to_bytes(h2, digest + 8);
to_bytes(h3, digest + 12);
}
#endif //MD5 |
GB_binop__isle_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8)
// A*D function (colscale): GB (_AxD__isle_int8)
// D*A function (rowscale): GB (_DxB__isle_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8)
// C=scalar+B GB (_bind1st__isle_int8)
// C=scalar+B' GB (_bind1st_tran__isle_int8)
// C=A+scalar GB (_bind2nd__isle_int8)
// C=A'+scalar GB (_bind2nd_tran__isle_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pf_fold.c | /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility
* wrappers
*
* Ivo L Hofacker + Ronny Lorenz
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int st_back = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/* we need vrna_exp_param_t datastructure to correctly init default hard constraints */
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat)) {
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs) {
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL) {
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
int i, j, *my_iindx;
double d = 0;
if (pr == NULL) {
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->q) {
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4)) {
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6)) {
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3) {
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else {
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/* compute Boltzmann weight of interior loop,
* multiply by scale[u1+u2+2] for scaling */
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0)) {
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0) {
if ((u1 == 0) || (u2 == 0)) {
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1) {
z *= pf_params->expstack[type][type2];
} else {
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else {
/* interior loop */
if (u1 + u2 == 2) {
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2)) {
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1)) {
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2)) {
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) {
/*2-3 is special*/
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1)) {
/*1-n is special*/
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else {
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*-------------------------------------------------------------------------*/
/* make arrays used for pf_fold available to other routines */
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL **qb_p,
FLT_OR_DBL **qm_p,
FLT_OR_DBL **q1k_p,
FLT_OR_DBL **qln_p)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->qb) {
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound) {
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t **pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL *p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL) {
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
|
CPhotoconsistencyOdometry.h | /*
* Photoconsistency-Visual-Odometry
* Multiscale Photoconsistency Visual Odometry from RGBD Images
* Copyright (c) 2012, Miguel Algaba Borrego
*
* http://code.google.com/p/photoconsistency-visual-odometry/
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the holder(s) nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _CPHOTOCONSISTENCY_ODOMETRY_
#define _CPHOTOCONSISTENCY_ODOMETRY_
#define ENABLE_OPENMP_MULTITHREADING_WARP_IMAGE 0
#include "opencv2/imgproc/imgproc.hpp"
#include <eigen3/Eigen/Dense>
namespace PhotoconsistencyOdometry
{
void eigenPose(float x,
float y,
float z,
float yaw,
float pitch,
float roll,
Eigen::Matrix4f & pose)
{
pose(0,0) = cos(yaw) * cos(pitch);
pose(0,1) = cos(yaw) * sin(pitch) * sin(roll) - sin(yaw) * cos(roll);
pose(0,2) = cos(yaw) * sin(pitch) * cos(roll) + sin(yaw) * sin(roll);
pose(0,3) = x;
pose(1,0) = sin(yaw) * cos(pitch);
pose(1,1) = sin(yaw) * sin(pitch) * sin(roll) + cos(yaw) * cos(roll);
pose(1,2) = sin(yaw) * sin(pitch) * cos(roll) - cos(yaw) * sin(roll);
pose(1,3) = y;
pose(2,0) = -sin(pitch);
pose(2,1) = cos(pitch) * sin(roll);
pose(2,2) = cos(pitch) * cos(roll);
pose(2,3) = z;
pose(3,0) = 0;
pose(3,1) = 0;
pose(3,2) = 0;
pose(3,3) = 1;
}
template <class T>
void warpImage(cv::Mat & imgGray,
cv::Mat & imgDepth,
cv::Mat & imgGrayWarped,
Eigen::Matrix4f & Rt,
Eigen::Matrix3f & cameraMatrix,int level=0)
{
float fx = cameraMatrix(0,0)/pow(2,level);
float fy = cameraMatrix(1,1)/pow(2,level);
float inv_fx = 1.f/fx;
float inv_fy = 1.f/fy;
float ox = cameraMatrix(0,2)/pow(2,level);
float oy = cameraMatrix(1,2)/pow(2,level);
Eigen::Vector4f point3D;
Eigen::Vector4f transformedPoint3D;
int transformed_r,transformed_c; // 2D coordinates of the transformed pixel(r,c) of frame 1
imgGrayWarped = cv::Mat::zeros(imgGray.rows,imgGray.cols,imgGray.type());
#if ENABLE_OPENMP_MULTITHREADING_WARP_IMAGE
#pragma omp parallel for private(point3D,transformedPoint3D,transformed_r,transformed_c)
#endif
for(int r=0;r<imgGray.rows;r++)
{
for(int c=0;c<imgGray.cols;c++)
{
if(imgDepth.at<float>(r,c)>0) //If has valid depth value
{
//Compute the local 3D coordinates of pixel(r,c) of frame 1
point3D(2) = imgDepth.at<float>(r,c); //z
point3D(0) = (c-ox) * point3D(2) * inv_fx; //x
point3D(1) = (r-oy) * point3D(2) * inv_fy; //y
point3D(3) = 1.0; //homogeneous coordinate
//Transform the 3D point using the transformation matrix Rt
transformedPoint3D = Rt * point3D;
//Project the 3D point to the 2D plane
transformed_c = ((transformedPoint3D(0) * fx) / transformedPoint3D(2)) + ox; //transformed x (2D)
transformed_r = ((transformedPoint3D(1) * fy) / transformedPoint3D(2)) + oy; //transformed y (2D)
//Asign the intensity value to the warped image and compute the difference between the transformed
//pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function
if(transformed_r>=0 && transformed_r < imgGray.rows &
transformed_c>=0 && transformed_c < imgGray.cols)
{
imgGrayWarped.at<T>(transformed_r,transformed_c)=imgGray.at<T>(r,c);
}
}
}
}
}
/*!This abstract class defines the mandatory methods that any derived class must implement to compute the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach.*/
class CPhotoconsistencyOdometry
{
public:
/*!Sets the 3x3 matrix of (pinhole) camera intrinsic parameters used to obtain the 3D colored point cloud from the RGB and depth images.*/
virtual void setCameraMatrix(Eigen::Matrix3f & camMat)=0;
/*!Sets the source (Intensity+Depth) frame.*/
virtual void setSourceFrame(cv::Mat & imgGray,cv::Mat & imgDepth)=0;
/*!Sets the source (Intensity+Depth) frame.*/
virtual void setTargetFrame(cv::Mat & imgGray,cv::Mat & imgDepth)=0;
/*!Initializes the state vector to a certain value. The optimization process uses the initial state vector as the initial estimate.*/
virtual void setInitialStateVector(const std::vector<double> & initialStateVector)=0;
/*!Launches the least-squares optimization process to find the configuration of the state vector parameters that maximizes the photoconsistency between the source and target frame.*/
virtual void optimize()=0;
/*!Returns the optimal state vector. This method has to be called after calling the optimize() method.*/
virtual void getOptimalStateVector(std::vector<double> & optimalStateVector)=0;
/*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame. This method has to be called after calling the optimize() method.*/
virtual void getOptimalRigidTransformationMatrix(Eigen::Matrix4f & optimal_Rt)=0;
};
} //end namespace PhotoconsistencyOdometry
#endif
|
dct2_fft2.h | /**
* @file dct2_fft2.h
* @author Zixuan Jiang, Jiaqi Gu
* @date Aug 2019
* @brief All the transforms in this file are implemented based on 2D FFT.
* Each transfrom has three steps, 1) preprocess, 2) 2d fft or 2d ifft, 3) postprocess.
*/
#ifndef DREAMPLACE_DCT2_FFT2_H
#define DREAMPLACE_DCT2_FFT2_H
#include <math.h>
#include <float.h>
#include "utility/src/torch.h"
#include "utility/src/Msg.h"
#include "utility/src/ComplexNumber.h"
DREAMPLACE_BEGIN_NAMESPACE
#define CHECK_CPU(x) AT_ASSERTM(!x.is_cuda(), #x "must be a tensor on CPU")
#define CHECK_FLAT(x) AT_ASSERTM(!x.is_cuda() && x.ndimension() == 1, #x "must be a flat tensor on GPU")
#define CHECK_EVEN(x) AT_ASSERTM((x.numel()&1) == 0, #x "must have even number of elements")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x "must be contiguous")
void dct2_fft2_forward(
at::Tensor x,
at::Tensor expkM,
at::Tensor expkN,
at::Tensor out,
at::Tensor buf,
int num_threads);
void idct2_fft2_forward(
at::Tensor x,
at::Tensor expkM,
at::Tensor expkN,
at::Tensor out,
at::Tensor buf,
int num_threads);
void idct_idxst_forward(
at::Tensor x,
at::Tensor expkM,
at::Tensor expkN,
at::Tensor out,
at::Tensor buf,
int num_threads);
void idxst_idct_forward(
at::Tensor x,
at::Tensor expkM,
at::Tensor expkN,
at::Tensor out,
at::Tensor buf,
int num_threads);
inline int INDEX(const int hid, const int wid, const int N)
{
return (hid * N + wid);
}
template <typename T>
void dct2dPreprocessCpu(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
int halfN = N / 2;
#pragma omp parallel for num_threads(num_threads)
for(int hid = 0; hid < M; ++hid)
{
for(int wid = 0; wid < N; ++wid)
{
int index;
int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0);
switch (cond)
{
case 0:
index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN);
break;
case 1:
index = INDEX(2 * M - (hid + 1), wid / 2, halfN);
break;
case 2:
index = INDEX(hid, N - (wid + 1) / 2, halfN);
break;
case 3:
index = INDEX(hid, wid / 2, halfN);
break;
default:
break;
}
y[index] = x[INDEX(hid, wid, N)];
}
}
}
template <typename T>
void dct2dPreprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
dct2dPreprocessCpu<T>(x, y, M, N, num_threads);
}
template <typename T, typename TComplex>
void dct2dPostprocessCpu(
const TComplex* V,
T* y,
const int M,
const int N,
const TComplex* expkM,
const TComplex* expkN,
int num_threads)
{
int halfM = M / 2;
int halfN = N / 2;
T four_over_MN =(T)(4. / (M * N));
T two_over_MN =(T)(2. / (M * N));
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < halfM; ++hid)
{
for (int wid = 0; wid < halfN; ++wid)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
y[0] = V[0].x * four_over_MN;
y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN;
y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN;
y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN;
break;
}
case 1:
{
ComplexType<T> tmp;
tmp = V[wid];
y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
tmp = V[INDEX(halfM, wid, halfN + 1)];
y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
break;
}
case 2:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = V[INDEX(hid, 0, halfN + 1)];
tmp2 = V[INDEX(M - hid, 0, halfN + 1)];
tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y);
tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y);
y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN;
y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN;
tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN;
y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN;
break;
}
case 3:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN;
y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN;
break;
}
default:
assert(0);
break;
}
}
}
}
template <typename T>
void dct2dPostprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
const T* expkM,
const T* expkN,
int num_threads)
{
dct2dPostprocessCpu<T, ComplexType<T>>((ComplexType<T> *)x, y, M, N, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN, num_threads);
}
template <typename T, typename TComplex>
void idct2_fft2PreprocessCpu(
const T* input,
TComplex* output,
const int M,
const int N,
const TComplex* expkM,
const TComplex* expkN,
int num_threads)
{
const int halfM = M / 2;
const int halfN = N / 2;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < halfM; ++hid)
{
for (int wid = 0; wid < halfN; ++wid)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = input[0];
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[wid];
tmp_up.y = input[N - wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(hid, 0, N)];
tmp3 = input[INDEX(M - hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, wid, N)];
T tmp2 = input[INDEX(hid, N - wid, N)];
T tmp3 = input[INDEX(M - hid, wid, N)];
T tmp4 = input[INDEX(M - hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
}
template <typename T>
void idct2_fft2PreprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
const T* expkM,
const T* expkN,
int num_threads)
{
idct2_fft2PreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads);
}
template <typename T>
void idct2_fft2PostprocessCpu(
const T *x,
T *y,
const int M,
const int N,
int num_threads)
{
int MN = M * N;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < M; ++hid)
{
for (int wid = 0; wid < N; ++wid)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
break;
default:
assert(0);
break;
}
y[index] = x[INDEX(hid, wid, N)] * MN;
}
}
}
template <typename T>
void idct2_fft2PostprocessCpuLauncher(
const T *x,
T *y,
const int M,
const int N,
int num_threads)
{
idct2_fft2PostprocessCpu<T>(x, y, M, N, num_threads);
}
template <typename T, typename TComplex>
void idct_idxstPreprocessCpu(
const T* input,
TComplex* output,
const int M,
const int N,
const TComplex* expkM,
const TComplex* expkN,
int num_threads)
{
int halfM = M / 2;
int halfN = N / 2;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < halfM; ++hid)
{
for (int wid = 0; wid < halfN; ++wid)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
output[INDEX(halfM, 0, halfN + 1)].x = 0;
output[INDEX(halfM, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[N - wid];
tmp_up.y = input[wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, N - wid, N)];
T tmp2 = input[INDEX(halfM, wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
output[INDEX(hid, 0, halfN + 1)].x = 0;
output[INDEX(hid, 0, halfN + 1)].y = 0;
output[INDEX(M - hid, 0, halfN + 1)].x = 0;
output[INDEX(M - hid, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, N - wid, N)];
T tmp2 = input[INDEX(hid, wid, N)];
T tmp3 = input[INDEX(M - hid, N - wid, N)];
T tmp4 = input[INDEX(M - hid, wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
}
template <typename T>
void idct_idxstPreprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
const T* expkM,
const T* expkN,
int num_threads)
{
idct_idxstPreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads);
}
template <typename T>
void idct_idxstPostprocessCpu(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
//const int halfN = N / 2;
const int MN = M * N;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < M; ++hid)
{
for (int wid = 0; wid < N; ++wid)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
}
template <typename T>
void idct_idxstPostprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
idct_idxstPostprocessCpu<T>(x, y, M, N, num_threads);
}
template <typename T, typename TComplex>
void idxst_idctPreprocessCpu(
const T* input,
TComplex* output,
const int M,
const int N,
const TComplex* expkM,
const TComplex* expkN,
int num_threads)
{
const int halfM = M / 2;
const int halfN = N / 2;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < halfM; ++hid)
{
for (int wid = 0; wid < halfN; ++wid)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
output[halfN].x = 0;
output[halfN].y = 0;
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
output[wid].x = 0;
output[wid].y = 0;
TComplex tmp_up;
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(M - hid, 0, N)];
tmp3 = input[INDEX(hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(M - hid, halfN, N)];
tmp3 = input[INDEX(hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(M - hid, wid, N)];
T tmp2 = input[INDEX(M - hid, N - wid, N)];
T tmp3 = input[INDEX(hid, wid, N)];
T tmp4 = input[INDEX(hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
}
template <typename T>
void idxst_idctPreprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
const T* expkM,
const T* expkN,
int num_threads)
{
idxst_idctPreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads);
}
template <typename T>
void idxst_idctPostprocessCpu(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
//const int halfN = N / 2;
const int MN = M * N;
#pragma omp parallel for num_threads(num_threads)
for (int hid = 0; hid < M; ++hid)
{
for (int wid = 0; wid < N; ++wid)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
}
template <typename T>
void idxst_idctPostprocessCpuLauncher(
const T* x,
T* y,
const int M,
const int N,
int num_threads)
{
idxst_idctPostprocessCpu<T>(x, y, M, N, num_threads);
}
DREAMPLACE_END_NAMESPACE
#endif
|
HSetMaintainer.h | #ifndef HSET_MAINTAINER_H
#define HSET_MAINTAINER_H
/*************************************************************
* Copyright: (C) 2012 by Markus Schordan *
* Author : Markus Schordan *
* License : see file LICENSE in the CodeThorn distribution *
*************************************************************/
#include <boost/unordered_set.hpp>
//#define HSET_MAINTAINER_DEBUG_MODE
/*!
* \author Markus Schordan
* \date 2012.
*/
template<typename KeyType,typename HashFun, typename EqualToPred>
class HSetMaintainer
: public boost::unordered_set<KeyType*,HashFun,EqualToPred>
{
public:
typedef std::pair<bool,const KeyType*> ProcessingResult;
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer() { _keepStatesDuringDeconstruction = false; }
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer(bool keepStates) { _keepStatesDuringDeconstruction = keepStates; }
/*!
* \author Marc Jasper
* \date 2016.
*/
virtual ~HSetMaintainer() {
if (!_keepStatesDuringDeconstruction){
typename HSetMaintainer::iterator i;
for (i=this->begin(); i!=this->end(); ++i) {
delete (*i);
}
}
}
bool exists(KeyType& s) {
return determine(s)!=0;
}
size_t id(const KeyType& s) {
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator i;
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
// in lack of operator '-' we compute the distance
size_t pos=0;
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator b;
b=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
while(b!=i) {
pos++;
++b;
}
return pos;
}
else
throw "Error: unknown value. Maintainer cannot determine an id.";
}
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
KeyType* determine(KeyType& s) {
KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(&s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
const KeyType* determine(const KeyType& s) {
const KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(const_cast<KeyType*>(&s));
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
ProcessingResult process(const KeyType* key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
if(iter!=this->end()) {
// found it!
res=std::make_pair(iter,false);
} else {
res=this->insert(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
}
res2=std::make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNewOrExisting(const KeyType* s) {
ProcessingResult res=process(s);
return res.second;
}
//! <true,const KeyType> if new element was inserted
//! <false,const KeyType> if element already existed
ProcessingResult process(KeyType key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(&key);
if(iter!=this->end()) {
// found it!
res=std::make_pair(iter,false);
} else {
// converting the stack allocated object to heap allocated
// this copies the entire object
// TODO: this can be avoided by providing a process function with a pointer arg
// this requires a more detailed result: pointer exists, alternate pointer with equal object exists, does not exist
KeyType* keyPtr=new KeyType();
*keyPtr=key;
res=this->insert(keyPtr);
if (!res.second) {
// this case should never occur, condition "iter!=this->end()" above would have been satisfied and
// this else branch would have therefore been ignored
std::cerr << "ERROR: HSetMaintainer: Element was not inserted even though it could not be found in the set." << std::endl;
ROSE_ASSERT(0);
delete keyPtr;
keyPtr = NULL;
}
}
#ifdef HSET_MAINTAINER_DEBUG_MODE
std::pair<typename HSetMaintainer::iterator, bool> res1;
res1=this->insert(key);
std::pair<typename HSetMaintainer::iterator, bool> res2;
res2=this->insert(key);
if(!(res1==res2)) {
std::cerr<< "Error: HsetMaintainer failed:"<<std::endl;
std::cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<std::endl;
std::cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<std::endl;
exit(1);
}
std::cerr << "HSET insert OK"<<std::endl;
#endif
res2=std::make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNew(KeyType& s) {
//std::pair<typename HSetMaintainer::iterator, bool> res=process(s);
ProcessingResult res=process(s);
if(res.first!=true) {
std::cerr<< "Error: HsetMaintainer::processNew failed:"<<std::endl;
std::cerr<< "res:";
std::cout <<":"<<res.first<<std::endl;
std::cout <<res.second->toString();
exit(1);
}
return res.second;
}
const KeyType* processNewOrExisting(KeyType& s) {
ProcessingResult res=process(s);
return res.second;
}
long numberOf() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::size(); }
long maxCollisions() {
size_t max=0;
for(size_t i=0; i<HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_count();++i) {
if(HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i)>max) {
max=HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i);
}
}
return max;
}
double loadFactor() {
return HSetMaintainer<KeyType,HashFun,EqualToPred>::load_factor();
}
long memorySize() const {
long mem=0;
for(typename HSetMaintainer<KeyType,HashFun,EqualToPred>::const_iterator i
=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end();
++i) {
mem+=(*i)->memorySize();
mem+=sizeof(*i);
}
return mem+sizeof(*this);
}
private:
//const KeyType* ptr(KeyType& s) {}
bool _keepStatesDuringDeconstruction;
};
#endif
|
GB_binop__land_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16)
// A*D function (colscale): GB (_AxD__land_int16)
// D*A function (rowscale): GB (_DxB__land_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16)
// C=scalar+B GB (_bind1st__land_int16)
// C=scalar+B' GB (_bind1st_tran__land_int16)
// C=A+scalar GB (_bind2nd__land_int16)
// C=A'+scalar GB (_bind2nd_tran__land_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(16*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(16*t3+Nx+12,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matc.h | /***********************************************************************
|
| MATC.H - Last Edited 7. 8. 1988
|
************************************************************************/
/*
* $Id: matc.h,v 1.2 2007/06/08 08:12:19 jpr Exp $
*
* $Log: matc.h,v $
* Revision 1.2 2007/06/08 08:12:19 jpr
* *** empty log message ***
*
* Revision 1.1 2005/05/27 12:26:22 vierinen
* changed header install location
*
* Revision 1.1.1.1 2005/04/14 13:29:14 vierinen
* initial matc automake package
*
* Revision 1.3 2001/06/08 09:20:29 jpr
* *** empty log message ***
*
* Revision 1.2 1998/08/01 12:34:49 jpr
*
* Added Id, started Log.
*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <setjmp.h>
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <stdarg.h>
#include <sys/types.h>
#ifdef MODULE_MATC
#define EXT
#else
#define EXT extern
#endif
/*******************************************************************
LIST HANDLING DEFINITIONS
*******************************************************************/
typedef struct list {
struct list *next; /* pointer to next item in list */
char *name; /* name of list item */
} LIST;
/*
pointers to start of global lists
*/
#ifdef MODULE_MATC
#ifdef _OPENMP
/* Move initialization to matc.c::mtc_init() for thread safety */
EXT LIST * listheaders;
#pragma omp threadprivate(listheaders)
#else
EXT LIST listheaders[5] = {
{
NULL, "Allocations" /* memory allocations */
}, {
NULL, "Constants" /* global CONSTANTS */
}, {
NULL, "Currently defined VARIABLES" /* global VARIABLES */
}, {
NULL, "Builtin Functions" /* internal commands */
}, {
NULL, "User Functions" /* user defined functions */
}
};
#endif /* _OPENMP */
#else
#ifdef _OPENMP
/* Move initialization to matc.c::mtc_init() for thread safety */
EXT LIST * listheaders;
#pragma omp threadprivate(listheaders)
#else
EXT LIST listheaders[];
#endif /* _OPENMP */
#endif /* MODULE_MATC */
#define ALLOCATIONS 0
#define CONSTANTS 1
#define VARIABLES 2
#define COMMANDS 3
#define FUNCTIONS 4
#define MAX_HEADERS 5
#define ALLOC_HEAD listheaders[ALLOCATIONS].next
#define CONST_HEAD listheaders[CONSTANTS].next
#define VAR_HEAD listheaders[VARIABLES].next
#define COM_HEAD listheaders[COMMANDS].next
#define FUNC_HEAD listheaders[FUNCTIONS].next
#define NEXT(lst) (lst)->next
#define NAME(lst) (lst)->name
/*******************************************************************
MEMORY HANDLING
********************************************************************/
/*
memory allocation and deallocation routines
*/
#define ALLOCMEM(size) mem_alloc(size)
#define FREEMEM(ptr) mem_free(ptr)
/*
we use a lot of string copying.
*/
#define STRCOPY(str) strcpy((char *)ALLOCMEM(strlen(str)+1),(str))
typedef struct alloc_list {
struct alloc_list *next;
char *mem;
} ALLOC_LIST;
#define ALLOC_LST(mem) (ALLOC_LIST *)((char *)mem-sizeof(ALLOC_LIST))
#define ALLOC_PTR(lst) (char *)((char *)lst+sizeof(ALLOC_LIST))
/*******************************************************************
VARIABLES
*******************************************************************/
/*
* MATC matrix is internally represented by this structure.
*/
typedef struct MATRIX
{
int type, /* TYPE_DOUBLE or TYPE_STRING */
refcount, /* reference count */
nrow, ncol; /* number of rows and columns */
double *data; /* pointer to double array */
} MATRIX;
/*
* list of VARIABLES
*/
typedef struct variable
{
struct variable *next; /* pointer to next item in list */
char *name; /* name of the item */
int changed;
MATRIX *this;
} VARIABLE;
/*
shortcuts for accsessing structure MATRIX
*/
#define MATR(ptr) (ptr)->this->data
#define TYPE(ptr) (ptr)->this->type
#define NROW(ptr) (ptr)->this->nrow
#define NCOL(ptr) (ptr)->this->ncol
#define REFCNT(ptr) (ptr)->this->refcount
#define M(ptr,i,j) (ptr)->this->data[(i) * NCOL(ptr) + (j)]
#define VARIABLESIZE sizeof(VARIABLE)
#define MATRIXSIZE sizeof(MATRIX)
#define MATSIZE(ptr) NROW(ptr)*NCOL(ptr)*sizeof(double)
#define TYPE_DOUBLE 0
#define TYPE_COMPLEX 1 /* this is not */
#define TYPE_STRING 2
/*******************************************************************
INTERNAL COMMANDS AND USER FUNCTIONS
*******************************************************************/
typedef struct command
{
struct command *next; /* pointer to next item in list */
char *name; /* name of the item */
int flags, /* CMDFLAG_PW & CMDFLAG_CE */
minp, maxp; /* min. and max. no. of parameters */
VARIABLE *(*sub)(); /* function to execute */
char *help; /* help string... */
} COMMAND;
#define COMSIZE sizeof(COMMAND)
#define CMDFLAG_PW 1 /* element by element operation */
#define CMDFLAG_CE 2 /* command can be executed when
preprosessing if constant
arguments. */
/*******************************************************************
USER DEFINED FUNCTIONS
*******************************************************************/
typedef struct function
{
struct function *next; /* pointer to next function in list */
char *name, /* name of the function */
**parnames, /* function parameter names (if any) */
**exports, /* functions exported variables */
**imports, /* functions imported variables */
*help; /* functions help text */
int parcount; /* defined number of parameters */
struct clause *body; /* function body */
} FUNCTION;
#define FUNCSIZE sizeof(FUNCTION)
/*******************************************************************
MISC DEFINITONS FOR PARSER
*******************************************************************/
typedef enum symbols {
nullsym, leftpar, rightpar, indopen, indclose, power, times, ptimes, divide,
plus, minus, reduction, transpose, eq, neq, lt, gt, le, ge, and, or, not,
assignsym, apply, resize, vector, statemend, argsep, name, number, string,
funcsym, import, export, ifsym, thensym, elsesym, whilesym, forsym,
beginsym, endsym, breaksym, comment, systemcall
} SYMTYPE;
#ifdef MODULE_MATC
/*--------------------------------------------------------------------*/
SYMTYPE ssymbols[] = {
leftpar, rightpar, indopen, indclose, beginsym, endsym, power, times, ptimes,
divide, plus, minus, reduction, transpose, lt, gt, and, or, not,
assignsym, apply, resize, vector, statemend, argsep, comment, systemcall
};
char csymbols[] = {
'(', ')', '[', ']', '{', '}', '^', '*', '#', '/', '+', '-', '\?',
'\'', '<', '>', '&', '|', '~', '=', '@', '%', ':', ';', ',', '!', '$'
};
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------*/
char *reswords[] = {
"function", "import", "export", "if", "then", "else", "while", "for",
"begin", "end", "break", NULL
};
SYMTYPE rsymbols[] = {
funcsym, import, export, ifsym, thensym, elsesym, whilesym, forsym,
beginsym, endsym, breaksym
};
/*--------------------------------------------------------------------*/
char *symchars = "`._";
#else
EXT SYMTYPE ssymbols[];
EXT char csymbols[];
EXT char *reswords[];
EXT SYMTYPE rsymbols[];
EXT char *symchars;
/* #pragma omp threadprivate(ssymbols, csymbols, reswords, rsymbols, symchars)
* TODO: this should be added when GCC 4.8 arrives if needed */
#endif
/*
dataentry for expression trees
*/
typedef struct treeentry
{
struct tree *args, /* parameters for functions */
*subs; /* indexes for VARIABLES, equation results */
int entrytype; /* type of entrydata */
union data_entry
{
char *s_data; /* function or VARIABLE names or string constants */
double d_data; /* numeric constant */
VARIABLE *c_data; /* real constant, with no name references */
MATRIX *(*v_data)(); /* function address (for builtin operations) */
} entrydata;
} TREEENTRY;
#define ETYPE_NAME 0
#define ETYPE_NUMBER 1
#define ETYPE_STRING 2
#define ETYPE_OPER 3
#define ETYPE_CONST 4
#define ETYPE_EQUAT 5
/*
* four leaf tree, isn't that odd
*/
typedef struct tree {
struct tree *next;
struct tree *link;
struct tree *left, *right;
TREEENTRY tentry;
} TREE;
/*
shortcuts for accsessing above structures
*/
#define SDATA(ptr) (ptr)->tentry.entrydata.s_data
#define DDATA(ptr) (ptr)->tentry.entrydata.d_data
#define CDATA(ptr) (ptr)->tentry.entrydata.c_data
#define VDATA(ptr) (ptr)->tentry.entrydata.v_data
#define ETYPE(ptr) (ptr)->tentry.entrytype
#define SUBS(ptr) (ptr)->tentry.subs
#define ARGS(ptr) (ptr)->tentry.args
#define LEFT(ptr) (ptr)->left
#define RIGHT(ptr) (ptr)->right
/*
this is an operations list. data can be one of
the following:
ifsym, elsesym, whilesym, forsym, assignsym, funcsym
every input line is compiled to this type of list,
and it is used to hold function bodies.
*/
typedef struct clause
{
struct clause *link;
struct clause *jmp;
TREE *this;
SYMTYPE data;
} CLAUSE;
#define LINK(ptr) (ptr)->link
/*******************************************************************
THIS AND THAT
*******************************************************************/
#ifdef sign
# undef sign
#endif
#ifdef max
# undef max
#endif
#ifdef min
# undef min
#endif
#ifdef abs
# undef abs
#endif
#define sign(x) ((x) > 0 ? 1 : ((x) < 0 ? -1 : 0))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define min(x,y) ((x) > (y) ? (y) : (x))
#define abs(x) ((x) > 0 ? (x) : -(x))
#define FOREVER for(;;)
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
/*
promptmode flags:
PMODE_MAIN ===> MATC>
PMODE_BLOCK ===> ....>
*/
#define PMODE_MAIN "MATC> "
#define PMODE_BLOCK "....> "
#define PMODE_CONT "####> "
EXT FILE *math_in, *math_out, *math_err;
#pragma omp threadprivate(math_in, math_out, math_err)
/*
see doread(), error() in matc.c
*/
EXT jmp_buf *jmpbuf;
#pragma omp threadprivate(jmpbuf)
EXT int term;
#pragma omp threadprivate(term)
#ifdef VAX
struct desc
{
int length;
char *addr;
} ;
#endif
#define COMMENT '!' /* comment introducer */
#define SYSTEM '$' /* system()-call introducer */
#define STRING_OUTPUT
#ifdef STRING_OUTPUT
#ifdef MODULE_MATC
static char *math_out_str = NULL;
static int math_out_count;
#pragma omp threadprivate (math_out_str, math_out_count)
#endif
#endif
void mem_free_all(void);
#ifdef MODULE_MATC
static int math_out_allocated = 0;
#pragma omp threadprivate (math_out_allocated)
void error_matc( char *format, ... )
{
va_list args;
va_start( args, format );
#ifdef STRING_OUTPUT
if ( math_out_count+512 > math_out_allocated )
{
math_out_allocated += 512;
math_out_str = (char *)realloc( math_out_str, math_out_allocated );
}
math_out_count += sprintf( &math_out_str[math_out_count], "MATC ERROR: " );
math_out_count += vsprintf( &math_out_str[math_out_count], format, args );
#else
fprintf( math_err, "MATC ERROR: " );
vfprintf( math_err, format, args );
#endif
va_end( args );
(void)mem_free_all();
longjmp( *jmpbuf, 2 );
}
void PrintOut( char *format, ... )
{
va_list args;
va_start( args, format );
#ifdef STRING_OUTPUT
if ( math_out_count+512 > math_out_allocated )
{
math_out_allocated += 512;
math_out_str = (char *)realloc( math_out_str, math_out_allocated );
}
math_out_count += vsprintf( &math_out_str[math_out_count], format, args );
#else
vfprintf( math_out, format, args );
#endif
va_end( args );
}
#else
extern void error_matc( char *format, ... );
extern void PrintOut( char *format, ... );
#endif
#define error error_matc
/*******************************************************************
function prototypes
*******************************************************************/
#include "fnames.h"
/*******************************************************************
graphics package defitions
*******************************************************************/
#include "gra.h"
|
pdf_fmt.c | /**
* Copyright (C) 2006 Henning Norén
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*
* Re-factored for JtR by Dhiru Kholia during June, 2011 for GSoC.
*
* References:
*
* http://www.adobe.com/devnet/pdf/pdf_reference.html
* http://www.cs.cmu.edu/~dst/Adobe/Gallery/anon21jul01-pdf-encryption.txt
* http://www.novapdf.com/kb/pdf-example-files-created-with-with-novapdf-138.html
*
* TODO: add support for detecting AESV2 and AESV3 encrypted documents
* lacking "trailer dictionary" to pdfparser.c */
#undef MEM_FREE
#include <string.h>
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "pdfcrack.h"
#include "pdfparser.h"
#define FORMAT_LABEL "PDF"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD5 RC4 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static struct custom_salt *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int any_cracked;
static size_t cracked_size;
static struct fmt_tests pdf_tests[] = {
{"$pdf$Standard*badad1e86442699427116d3e5d5271bc80a27814fc5e80f815efeef839354c5f*289ece9b5ce451a5d7064693dab3badf101112131415161718191a1b1c1d1e1f*16*34b1b6e593787af681a9b63fa8bf563b*1*1*0*1*4*128*-4*3*2", "test"},
{"$pdf$Standard*d83a8ab680f144dfb2ff2334c206a6060779e007701ab881767f961aecda7984*a5ed4de7e078cb75dfdcd63e8da7a25800000000000000000000000000000000*16*06a7f710cf8dfafbd394540d40984ae2*1*1*0*1*4*128*-1028*3*2", "July2099"},
{"$pdf$Standard*2446dd5ed2e18b3ce1ac9b56733226018e3f5c2639051eb1c9b2b215b30bc820*fa3af175d761963c8449ee7015b7770800000000000000000000000000000000*16*12a4da1abe6b7a1ceb84610bad87236d*1*1*0*1*4*128*-1028*3*2", "WHATwhatWHERE?"},
{"$pdf$Standard*6a80a547b8b8b7636fcc5b322f1c63ce4b670c9b01f2aace09e48d85e1f19f83*e64eb62fc46be66e33571d50a29b464100000000000000000000000000000000*16*14a8c53ffa4a79b3ed9421ef15618420*1*1*0*1*4*128*-1028*3*2", "38r285a9"},
{NULL}
};
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_tiny(cracked_size, MEM_ALIGN_WORD);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int res;
if (strncmp(ciphertext, "$pdf$Standard*", 14))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += 14;
if (!(ptr = strtok(ctcopy, "*"))) /* o_string */
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* u_string */
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* fileIDLen */
goto error;
if (strncmp(ptr, "16", 2))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* fileID */
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* encryptMetaData */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtok(NULL, "*"))) /* work_with_user */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtok(NULL, "*"))) /* have_userpassword */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtok(NULL, "*"))) /* version_major */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* version_minor */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* length */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* permissions */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* revision */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* version */
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
ctcopy += 5; /* skip over "$pdf$" marker */
memset(cs.encKeyWorkSpace, 0, 128);
/* restore serialized data */
strncpy(cs.e.s_handler, strtok(ctcopy, "*"), 32);
p = strtok(NULL, "*");
for (i = 0; i < 32; i++)
cs.e.o_string[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
for (i = 0; i < 32; i++)
cs.e.u_string[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.e.fileIDLen = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.e.fileIDLen; i++)
cs.e.fileID[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.e.encryptMetaData = atoi(p);
p = strtok(NULL, "*");
cs.e.work_with_user = atoi(p);
p = strtok(NULL, "*");
cs.e.have_userpassword = atoi(p);
p = strtok(NULL, "*");
cs.e.version_major = atoi(p);
p = strtok(NULL, "*");
cs.e.version_minor = atoi(p);
p = strtok(NULL, "*");
cs.e.length = atoi(p);
p = strtok(NULL, "*");
cs.e.permissions = atoi(p);
p = strtok(NULL, "*");
cs.e.revision = atoi(p);
p = strtok(NULL, "*");
cs.e.version = atoi(p);
if (cs.e.have_userpassword)
cs.userpassword = (unsigned char *)strtok(NULL, "*");
else
cs.userpassword = NULL;
cs.knownPassword = false;
MEM_FREE(keeptr);
/* try to initialize the cracking-engine */
if (!initPDFCrack(&cs)) {
fprintf(stderr, "Wrong userpassword, '%s'\n", cs.userpassword);
exit(-1);
}
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
loadPDFCrack(cur_salt);
}
static void pdf_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static void crypt_all(int count)
{
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
/* do the actual crunching */
cracked[index] = runCrack(saved_key[index], cur_salt);
if(cracked[index] == 1)
#ifdef _OPENMP
#pragma omp critical
#endif
any_cracked = 1;
}
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_pdf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
DEFAULT_ALIGN,
SALT_SIZE,
DEFAULT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
pdf_tests
},
{
init,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
pdf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
GB_binop__max_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp32)
// A*D function (colscale): GB (_AxD__max_fp32)
// D*A function (rowscale): GB (_DxB__max_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp32)
// C=scalar+B GB (_bind1st__max_fp32)
// C=scalar+B' GB (_bind1st_tran__max_fp32)
// C=A+scalar GB (_bind2nd__max_fp32)
// C=A'+scalar GB (_bind2nd_tran__max_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = fmaxf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmaxf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmaxf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmaxf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_dilated.h | #pragma omp target teams distribute parallel for thread_limit(local_work_size)
for (int thread_id = 0; thread_id < global_work_size; thread_id++)
{
// Find the center of the structuring element
int el_center_i = strel_m / 2;
int el_center_j = strel_n / 2;
// Determine this thread's location in the matrix
int i = thread_id % max_gicov_m;
int j = thread_id / max_gicov_m;
// Initialize the maximum GICOV score seen so far to zero
float max = 0.0f;
// Iterate across the structuring element in one dimension
int el_i, el_j, x, y;
for (el_i = 0; el_i < strel_m; el_i++) {
y = i - el_center_i + el_i;
// Make sure we have not gone off the edge of the matrix
if ( (y >= 0) && (y < max_gicov_m) ) {
// Iterate across the structuring element in the other dimension
for (el_j = 0; el_j < strel_n; el_j++) {
x = j - el_center_j + el_j;
// Make sure we have not gone off the edge of the matrix
// and that the current structuring element value is not zero
if ( (x >= 0) && (x < max_gicov_n) &&
(host_strel[(el_i * strel_n) + el_j] != 0) ) {
// Determine if this is the maximal value seen so far
int addr = (x * max_gicov_m) + y;
float temp = host_gicov[addr];
if (temp > max) max = temp;
}
}
}
}
// Store the maximum value found
// Warning: thread_id is not equal to i * max_gicov_n + j
host_dilated[(i * max_gicov_n) + j] = max;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
register ssize_t
i,
j;
MagickPixelPacket
**pixels;
size_t
length,
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
length=image->columns;
if (length < number_images)
length=number_images;
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) length; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-
(int) MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
Quantum pixel,const MagickEvaluateOperator op,const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
const Image
*next;
Image
*evaluate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict evaluate_pixels,
zero;
RandomInfo
**restrict random_info;
size_t
number_images;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=images; next != (Image *) NULL; next=GetNextImageInList(next))
if ((next->columns != images->columns) || (next->rows != images->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ImageWidthsOrHeightsDiffer","`%s'",images->filename);
return((Image *) NULL);
}
/*
Initialize evaluate next attributes.
*/
evaluate_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (evaluate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(evaluate_image,DirectClass) == MagickFalse)
{
InheritException(exception,&evaluate_image->exception);
evaluate_image=DestroyImage(evaluate_image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
evaluate_image=DestroyImage(evaluate_image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireCacheView(evaluate_image);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
evaluate_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),op,evaluate_pixel[i].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
if (evaluate_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(
evaluate_pixel[i/2].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (evaluate_image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
evaluate_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=QuantumScale;
evaluate_pixel[x].green*=QuantumScale;
evaluate_pixel[x].blue*=QuantumScale;
evaluate_pixel[x].opacity*=QuantumScale;
evaluate_pixel[x].index*=QuantumScale;
}
}
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
if (evaluate_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (evaluate_image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
evaluate_image=DestroyImage(evaluate_image);
return(evaluate_image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**restrict random_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelRed(q),op,value)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelGreen(q),op,value)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelBlue(q),op,value)));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelOpacity(q),op,value)));
else
SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],(Quantum) GetPixelAlpha(q),op,value)));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelIndex(indexes+x),op,value)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].variance-
channel_statistics[RedChannel].mean*
channel_statistics[RedChannel].mean;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].variance-
channel_statistics[GreenChannel].mean*
channel_statistics[GreenChannel].mean;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].variance-
channel_statistics[BlueChannel].mean*
channel_statistics[BlueChannel].mean;
channels++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[OpacityChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].variance-
channel_statistics[OpacityChannel].mean*
channel_statistics[OpacityChannel].mean;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlackChannel].variance-
channel_statistics[BlackChannel].mean*
channel_statistics[BlackChannel].mean;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelOpacity(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p)*GetPixelOpacity(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
mean+=GetPixelIndex(indexes+x);
sum_squares+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_fourth_power+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-1.0E-37);
*minima=1.0E+37;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
if (pixel.opacity < *minima)
*minima=(double) pixel.opacity;
if (pixel.opacity > *maxima)
*maxima=(double) pixel.opacity;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) GetPixelIndex(indexes+x) < *minima)
*minima=(double) GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > *maxima)
*maxima=(double) GetPixelIndex(indexes+x);
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-1.0E-37);
channel_statistics[i].minima=1.0E+37;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelRed(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelGreen(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelBlue(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelOpacity(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range) ?
MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
if (image->matte != MagickFalse)
{
if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double)
GetPixelOpacity(p);
if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double)
GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
}
x++;
p++;
}
}
area=(double) image->columns*image->rows;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].sum/=area;
channel_statistics[i].sum_squared/=area;
channel_statistics[i].sum_cubed/=area;
channel_statistics[i].sum_fourth_power/=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].standard_deviation=sqrt(
channel_statistics[i].variance-(channel_statistics[i].mean*
channel_statistics[i].mean));
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].variance/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
if (channel_statistics[i].standard_deviation == 0.0)
continue;
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-
3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+
2.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-
4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+
6.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation)-3.0;
}
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
ssize_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
statistic_view=AcquireCacheView(statistic_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+
neighbor_width*neighbor_height/2+x,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
return(statistic_image);
}
|
stream_int_omp.c | /*-----------------------------------------------------------------------*/
/* Program: Stream */
/* Revision: $Id: stream_omp.c,v 5.4 2009/02/19 13:57:12 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2003: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/* INSTRUCTIONS:
*
* 1) Stream requires a good bit of memory to run. Adjust the
* value of 'N' (below) to give a 'timing calibration' of
* at least 20 clock-ticks. This will provide rate estimates
* that should be good to about 5% precision.
*/
# define NN 20000
# define NTIMES 10
# define OFFSET 0
/*
* 3) Compile the code with full optimization. Many compilers
* generate unreasonably bad code before the optimizer tightens
* things up. If the results are unreasonably good, on the
* other hand, the optimizer might be too smart for me!
*
* Try compiling with:
* cc -O stream_omp.c -o stream_omp
*
* This is known to work on Cray, SGI, IBM, and Sun machines.
*
*
* 4) Mail the results to mccalpin@cs.virginia.edu
* Be sure to include:
* a) computer hardware model number and software revision
* b) the compiler flags
* c) all of the output from the test case.
* Thanks!
*
*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MYMIN
# define MYMIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MYMAX
# define MYMAX(x,y) ((x)>(y)?(x):(y))
# endif
static long long int a[NN+OFFSET],
b[NN+OFFSET],
c[NN+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(long long int) * NN,
2 * sizeof(long long int) * NN,
3 * sizeof(long long int) * NN,
3 * sizeof(long long int) * NN
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(long long int scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(long long int scalar);
#endif
int
app_main()
{
int quantum, checktick();
int BytesPerWord;
register int j, k;
long long int scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
BytesPerWord = sizeof(long long int);
printf("This system uses %d bytes per LONG LONG INT PRECISION word.\n",
BytesPerWord);
printf(HLINE);
printf("Array size = %d, Offset = %d\n" , NN, OFFSET);
printf("Total memory required = %.1f MB.\n",
(3.0 * BytesPerWord) * ( (double) NN / 1048576.0));
printf("Each test is run %d times, but only\n", NTIMES);
printf("the *best* time for each is used.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel private(k)
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<NN; j++) {
a[j] = 1;
b[j] = 2;
c[j] = 0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
t = mysecond();
#pragma omp parallel for
for (j = 0; j < NN; j++)
a[j] = 2 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3;
int unitCount = 8;
#define THREADSPERUNIT 32
int itersPerUnit = NN/unitCount;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT)
for (j=0; j<NN; j++)
c[j] = a[j];
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
printf("finished loop %d iter %d\n", 1, k);
#pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT)
for (j=0; j<NN; j++)
b[j] = scalar*c[j];
printf("finished loop %d iter %d\n", 2, k);
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT)
for (j=0; j<NN; j++)
c[j] = a[j]+b[j];
printf("finished loop %d iter %d\n", 3, k);
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT)
for (j=0; j<NN; j++)
a[j] = b[j]+scalar*c[j];
printf("finished loop %d iter %d\n", 4, k);
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MYMIN(mintime[j], times[j][k]);
maxtime[j] = MYMAX(maxtime[j], times[j][k]);
}
}
printf("Function Rate (MB/s) Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MYMIN(minDelta, MYMAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
void checkSTREAMresults ()
{
long long int aj,bj,cj,scalar;
long long int asum,bsum,csum;
long long int epsilon;
int j,k;
/* reproduce initialization */
aj = 1;
bj = 2;
cj = 0;
/* a[] is modified during timing check */
aj = 2 * aj;
/* now execute timing loop */
scalar = 3;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
aj = aj * (long long int) (NN);
bj = bj * (long long int) (NN);
cj = cj * (long long int) (NN);
asum = 0;
bsum = 0;
csum = 0;
for (j=0; j<NN; j++) {
asum += a[j];
bsum += b[j];
csum += c[j];
}
#ifdef VERBOSE
printf ("Results Comparison: \n");
printf (" Expected : %lld %lld %lld \n",aj,bj,cj);
printf (" Observed : %lld %lld %lld \n",asum,bsum,csum);
#endif
#define abs(a) ((a) >= 0 ? (a) : -(a))
epsilon = 0;
if (abs(aj-asum)/asum > epsilon) {
printf ("Failed Validation on array a[]\n");
printf (" Expected : %lld \n",aj);
printf (" Observed : %lld \n",asum);
}
else if (abs(bj-bsum)/bsum > epsilon) {
printf ("Failed Validation on array b[]\n");
printf (" Expected : %lld \n",bj);
printf (" Observed : %lld \n",bsum);
}
else if (abs(cj-csum)/csum > epsilon) {
printf ("Failed Validation on array c[]\n");
printf (" Expected : %lld \n",cj);
printf (" Observed : %lld \n",csum);
}
else {
printf ("Solution Validates\n");
}
}
#if 0
void tuned_STREAM_Copy()
{
int j;
#pragma omp parallel for schedule(static, 1)
for (j=0; j<NN; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(long long int scalar)
{
int j;
#pragma omp parallel for schedule(static, 1)
for (j=0; j<NN; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
int j;
#pragma omp parallel for schedule(static, 1)
for (j=0; j<NN; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(long long int scalar)
{
int j;
#pragma omp parallel for schedule(static, 1)
for (j=0; j<NN; j++)
a[j] = b[j]+scalar*c[j];
}
#endif
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/xml-tree.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,
% const size_t width,const size_t height,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o offset: the mean offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const ssize_t offset,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
number_pixels;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse)
{
InheritException(exception,&threshold_image->exception);
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Local adaptive threshold.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
number_pixels=(MagickRealType) (width*height);
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
channel_bias,
channel_sum;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict r;
register IndexPacket
*magick_restrict threshold_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
height/2L,image->columns+width,height,exception);
q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view);
channel_bias=zero;
channel_sum=zero;
r=p;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
{
channel_bias.red+=r[u].red;
channel_bias.green+=r[u].green;
channel_bias.blue+=r[u].blue;
channel_bias.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType)
GetPixelIndex(indexes+(r-p)+u);
}
channel_sum.red+=r[u].red;
channel_sum.green+=r[u].green;
channel_sum.blue+=r[u].blue;
channel_sum.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u);
}
r+=image->columns+width;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
mean;
mean=zero;
r=p;
channel_sum.red-=channel_bias.red;
channel_sum.green-=channel_bias.green;
channel_sum.blue-=channel_bias.blue;
channel_sum.opacity-=channel_bias.opacity;
channel_sum.index-=channel_bias.index;
channel_bias=zero;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias.red+=r[0].red;
channel_bias.green+=r[0].green;
channel_bias.blue+=r[0].blue;
channel_bias.opacity+=r[0].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0);
channel_sum.red+=r[width-1].red;
channel_sum.green+=r[width-1].green;
channel_sum.blue+=r[width-1].blue;
channel_sum.opacity+=r[width-1].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+
width-1);
r+=image->columns+width;
}
mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset);
mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset);
mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset);
mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset);
if (image->colorspace == CMYKColorspace)
mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset);
SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ?
0 : QuantumRange);
SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ?
0 : QuantumRange);
SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ?
0 : QuantumRange);
SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ?
0 : QuantumRange);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex(
threshold_indexes+x) <= mean.index) ? 0 : QuantumRange));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(threshold_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically selects a threshold and replaces each
% pixel in the image with a black pixel if the image intentsity is less than
% the selected threshold otherwise white.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
(void) exception;
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p++;
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(image,histogram,exception);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property);
return(BilevelImage(image,QuantumRange*threshold/100.0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImageChannel method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold)
% MagickBooleanType BilevelImageChannel(Image *image,
% const ChannelType channel,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: define the threshold values.
%
% Aside: You can get the same results as operator using LevelImageChannels()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold)
{
MagickBooleanType
status;
status=BilevelImageChannel(image,DefaultChannels,threshold);
return(status);
}
MagickExport MagickBooleanType BilevelImageChannel(Image *image,
const ChannelType channel,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 :
QuantumRange);
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 :
QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 :
QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 :
QuantumRange);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold ? 0 : QuantumRange);
else
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold ? OpaqueOpacity : TransparentOpacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,const char *threshold)
% MagickBooleanType BlackThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=BlackThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
Black threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) < threshold.red))
SetPixelRed(q,0);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) < threshold.green))
SetPixelGreen(q,0);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) < threshold.blue))
SetPixelBlue(q,0);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) < threshold.opacity))
SetPixelOpacity(q,0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x) < threshold.index))
SetPixelIndex(indexes+x,0);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImageChannel method is:
%
% MagickBooleanType ClampImage(Image *image)
% MagickBooleanType ClampImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image)
{
MagickBooleanType
status;
status=ClampImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType ClampImageChannel(Image *image,
const ChannelType channel)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q)));
SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q)));
SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q)));
SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q)));
q++;
}
return(SyncImage(image));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q)));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex(
indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImageChannel)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMapFile(const char *xml,
const char *filename,const char *map_id,ExceptionInfo *exception)
{
const char
*attribute,
*content;
double
value;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
map = (ThresholdMap *) NULL;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(map);
for (threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
attribute=GetXMLTreeAttribute(threshold, "map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold, "alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
The map has been found -- allocate a Threshold Map to return
*/
map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap));
if (map == (ThresholdMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
/*
Assign basic attributeibutes.
*/
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels, "divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
/*
Allocate theshold levels array.
*/
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
{
char
*p;
register ssize_t
i;
/*
Parse levels into integer array.
*/
for (i=0; i< (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() load and search one or more threshold map files for the
% a map matching the given name or aliase.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
XMLTreeInfo *thresholds,*threshold,*description;
const char *map,*alias,*content;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
map = GetXMLTreeAttribute(threshold, "map");
if (map == (char *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias = GetXMLTreeAttribute(threshold, "alias");
/* alias is optional, no if test needed */
description=GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if ( content == (char *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() uses the ordered dithering technique of reducing color
% images to monochrome using positional information to retain as much
% information as possible.
%
% WARNING: This function is deprecated, and is now just a call to
% the more more powerful OrderedPosterizeImage(); function.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image)
% MagickBooleanType OrderedDitherImageChannel(Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image)
{
MagickBooleanType
status;
status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception);
return(status);
}
MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Call the augumented function OrderedPosterizeImage()
*/
status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedPosterizeImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedPosterizeImage method is:
%
% MagickBooleanType OrderedPosterizeImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
% MagickBooleanType OrderedPosterizeImageChannel(Image *image,
% const ChannelType channel,const char *threshold_map,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedPosterizeImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map,
exception);
return(status);
}
MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image,
const ChannelType channel,const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
LongPixelPacket
levels;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
{
char
token[MaxTextExtent];
register const char
*p;
p=(char *)threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0')) {
if ((p-threshold_map) >= (MaxTextExtent-1))
break;
token[p-threshold_map] = *p;
p++;
}
token[p-threshold_map] = '\0';
map = GetThresholdMap(token, exception);
if ( map == (ThresholdMap *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
}
/* Set channel levels from extra comma separated arguments
Default to 2, the single value given, or individual channel values
*/
#if 1
{ /* parse directly as a comma separated list of integers */
char *p;
p = strchr((char *) threshold_map,',');
if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) )
levels.index = (unsigned int) strtoul(p, &p, 10);
else
levels.index = 2;
levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0;
levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0;
levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0;
levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0;
levels.index = ((channel & IndexChannel) != 0
&& (image->colorspace == CMYKColorspace)) ? levels.index : 0;
/* if more than a single number, each channel has a separate value */
if ( p != (char *) NULL && *p == ',' ) {
p=strchr((char *) threshold_map,',');
p++;
if ((channel & RedChannel) != 0)
levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & GreenChannel) != 0)
levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & BlueChannel) != 0)
levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace)
levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & OpacityChannel) != 0)
levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
}
}
#else
/* Parse level values as a geometry */
/* This difficult!
* How to map GeometryInfo structure elements into
* LongPixelPacket structure elements, but according to channel?
* Note the channels list may skip elements!!!!
* EG -channel BA -ordered-dither map,2,3
* will need to map g.rho -> l.blue, and g.sigma -> l.opacity
* A simpler way is needed, probably converting geometry to a temporary
* array, then using channel to advance the index into ssize_t pixel packet.
*/
#endif
#if 0
printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n",
levels.red, levels.green, levels.blue, levels.opacity, levels.index);
#endif
{ /* Do the posterized ordered dithering of the image */
ssize_t
d;
/* d = number of psuedo-level divisions added between color levels */
d = map->divisor-1;
/* reduce levels to levels - 1 */
levels.red = levels.red ? levels.red-1 : 0;
levels.green = levels.green ? levels.green-1 : 0;
levels.blue = levels.blue ? levels.blue-1 : 0;
levels.opacity = levels.opacity ? levels.opacity-1 : 0;
levels.index = levels.index ? levels.index-1 : 0;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
threshold,
t,
l;
/*
Figure out the dither threshold for this pixel
This must be a integer from 1 to map->divisor-1
*/
threshold = map->levels[(x%map->width) +map->width*(y%map->height)];
/* Dither each channel in the image as appropriate
Notes on the integer Math...
total number of divisions = (levels-1)*(divisor-1)+1)
t1 = this colors psuedo_level =
q->red * total_divisions / (QuantumRange+1)
l = posterization level 0..levels
t = dither threshold level 0..divisor-1 NB: 0 only on last
Each color_level is of size QuantumRange / (levels-1)
NB: All input levels and divisor are already had 1 subtracted
Opacity is inverted so 'off' represents transparent.
*/
if (levels.red) {
t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1));
l = t/d; t = t-l*d;
SetPixelRed(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red)));
}
if (levels.green) {
t = (ssize_t) (QuantumScale*GetPixelGreen(q)*
(levels.green*d+1));
l = t/d; t = t-l*d;
SetPixelGreen(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green)));
}
if (levels.blue) {
t = (ssize_t) (QuantumScale*GetPixelBlue(q)*
(levels.blue*d+1));
l = t/d; t = t-l*d;
SetPixelBlue(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue)));
}
if (levels.opacity) {
t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))*
(levels.opacity*d+1));
l = t/d; t = t-l*d;
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/
levels.opacity)));
}
if (levels.index) {
t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)*
(levels.index*d+1));
l = t/d; t = t-l*d;
SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+
(t>=threshold))*(MagickRealType) QuantumRange/levels.index)));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedPosterizeImageChannel)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImageChannel method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon)
% MagickBooleanType PerceptibleImageChannel(Image *image,
% const ChannelType channel,const double epsilon)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon)
{
MagickBooleanType
status;
status=PerceptibleImageChannel(image,DefaultChannels,epsilon);
return(status);
}
MagickExport MagickBooleanType PerceptibleImageChannel(Image *image,
const ChannelType channel,const double epsilon)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
q++;
}
return(SyncImage(image));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x),
epsilon));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImageChannel)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const char *thresholds,ExceptionInfo *exception)
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const ChannelType channel,const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing low,high thresholds. If the
% string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4
% is performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=RandomThresholdImageChannel(image,DefaultChannels,thresholds,
exception);
return(status);
}
MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickStatusType
flags;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickRealType
min_threshold,
max_threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (thresholds == (const char *) NULL)
return(MagickTrue);
GetMagickPixelPacket(image,&threshold);
min_threshold=0.0;
max_threshold=(MagickRealType) QuantumRange;
flags=ParseGeometry(thresholds,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(thresholds,'%') != (char *) NULL)
{
max_threshold*=(MagickRealType) (0.01*QuantumRange);
min_threshold*=(MagickRealType) (0.01*QuantumRange);
}
else
if (((max_threshold == min_threshold) || (max_threshold == 1)) &&
(min_threshold <= 8))
{
/*
Backward Compatibility -- ordered-dither -- IM v 6.2.9-6.
*/
status=OrderedPosterizeImageChannel(image,channel,thresholds,exception);
return(status);
}
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
if (channel == CompositeChannels)
{
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
IndexPacket
index;
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (intensity < min_threshold)
threshold.index=min_threshold;
else if (intensity > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType)(QuantumRange*
GetPseudoRandomValue(random_info[id]));
index=(IndexPacket) (intensity <= threshold.index ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if ((MagickRealType) GetPixelRed(q) < min_threshold)
threshold.red=min_threshold;
else
if ((MagickRealType) GetPixelRed(q) > max_threshold)
threshold.red=max_threshold;
else
threshold.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & GreenChannel) != 0)
{
if ((MagickRealType) GetPixelGreen(q) < min_threshold)
threshold.green=min_threshold;
else
if ((MagickRealType) GetPixelGreen(q) > max_threshold)
threshold.green=max_threshold;
else
threshold.green=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & BlueChannel) != 0)
{
if ((MagickRealType) GetPixelBlue(q) < min_threshold)
threshold.blue=min_threshold;
else
if ((MagickRealType) GetPixelBlue(q) > max_threshold)
threshold.blue=max_threshold;
else
threshold.blue=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & OpacityChannel) != 0)
{
if ((MagickRealType) GetPixelOpacity(q) < min_threshold)
threshold.opacity=min_threshold;
else
if ((MagickRealType) GetPixelOpacity(q) > max_threshold)
threshold.opacity=max_threshold;
else
threshold.opacity=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold)
threshold.index=min_threshold;
else
if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ?
0 : QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ?
0 : QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ?
0 : QuantumRange);
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold.opacity ? 0 : QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold.index ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold)
% MagickBooleanType WhiteThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=WhiteThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(thresholds,&geometry_info);
GetMagickPixelPacket(image,&threshold);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) > threshold.red))
SetPixelRed(q,QuantumRange);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) > threshold.green))
SetPixelGreen(q,QuantumRange);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) > threshold.blue))
SetPixelBlue(q,QuantumRange);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) > threshold.opacity))
SetPixelOpacity(q,QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index)
SetPixelIndex(indexes+x,QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
clauses-2.c | struct S { int r; int *s; int t[10]; };
void bar (int *);
void
foo (int *p, int q, struct S t, int i, int j, int k, int l)
{
#pragma omp target map (q), firstprivate (q) /* { dg-error "appears both in data and map clauses" } */
bar (&q);
#pragma omp target map (p[0]) firstprivate (p) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target firstprivate (p), map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (p[0]) map (p) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (p) , map (p[0]) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (q) map (q) /* { dg-error "appears more than once in map clauses" } */
bar (&q);
#pragma omp target map (p[0]) map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (t) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target firstprivate (t), map (t.r) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.s[0]) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t) map(t.s[0]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) map (t.s[2]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t.t[0:2]) map (t.t[4:6]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.t[i:j]) map (t.t[k:l]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.s[0]) map (t.r)
bar (t.s);
#pragma omp target map (t.r) ,map (t.s[0])
bar (t.s);
#pragma omp target map (t.r) map (t) map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t) map (t.r) firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s); /* { dg-error "appears more than once in map clauses" "" { target *-*-* } .-1 } */
}
|
GB_unop__identity_int16_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fc32)
// op(A') function: GB (_unop_tran__identity_int16_fc32)
// C type: int16_t
// A type: GxB_FC32_t
// cast: int16_t cij = GB_cast_to_int16_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fc32)
(
int16_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mypaint-tiled-surface.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <config.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-config.h"
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successfull, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active, float center_x)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15), op->normal*op->opaque*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->lock_alpha*op->opaque*(1<<15));
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float symm_x = self->surface_center_x + (self->surface_center_x - x);
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
#ifdef _OPENMP
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#endif
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->surface_center_x = 0.0f;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
GB_binop__lxor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lxor_int64
// A.*B function (eWiseMult): GB_AemultB__lxor_int64
// A*D function (colscale): GB_AxD__lxor_int64
// D*A function (rowscale): GB_DxB__lxor_int64
// C+=B function (dense accum): GB_Cdense_accumB__lxor_int64
// C+=b function (dense accum): GB_Cdense_accumb__lxor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_int64
// C=scalar+B GB_bind1st__lxor_int64
// C=scalar+B' GB_bind1st_tran__lxor_int64
// C=A+scalar GB_bind2nd__lxor_int64
// C=A'+scalar GB_bind2nd_tran__lxor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT64 || GxB_NO_LXOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lxor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lxor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lxor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lxor_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lxor_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lxor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lxor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lxor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lxor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lxor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lxor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__le_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint8)
// A*D function (colscale): GB (_AxD__le_uint8)
// D*A function (rowscale): GB (_DxB__le_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint8)
// C=scalar+B GB (_bind1st__le_uint8)
// C=scalar+B' GB (_bind1st_tran__le_uint8)
// C=A+scalar GB (_bind2nd__le_uint8)
// C=A'+scalar GB (_bind2nd_tran__le_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT8 || GxB_NO_LE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_verify_rowcols.c | #include "config.h"
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "kseq.h"
KSEQ_INIT(int, read)
#include "parasail.h"
#include "parasail/cpuid.h"
#include "parasail/memory.h"
#include "parasail/matrix_lookup.h"
#include "func_verify_rowcols.h"
static int verbose = 0;
typedef struct gap_score {
int open;
int extend;
} gap_score_t;
gap_score_t gap_scores[] = {
{10,1},
{10,2},
{14,2},
{40,2},
{INT_MIN,INT_MIN}
};
static inline void parse_sequences(
const char *filename,
char ***strings_,
unsigned long **sizes_,
unsigned long *count_)
{
FILE* fp;
kseq_t *seq = NULL;
int l = 0;
char **strings = NULL;
unsigned long *sizes = NULL;
unsigned long count = 0;
unsigned long memory = 1000;
fp = fopen(filename, "r");
if(fp == NULL) {
perror("fopen");
exit(1);
}
strings = malloc(sizeof(char*) * memory);
sizes = malloc(sizeof(unsigned long) * memory);
seq = kseq_init(fileno(fp));
while ((l = kseq_read(seq)) >= 0) {
strings[count] = strdup(seq->seq.s);
if (NULL == strings[count]) {
perror("strdup");
exit(1);
}
sizes[count] = seq->seq.l;
++count;
if (count >= memory) {
char **new_strings = NULL;
unsigned long *new_sizes = NULL;
memory *= 2;
new_strings = realloc(strings, sizeof(char*) * memory);
if (NULL == new_strings) {
perror("realloc");
exit(1);
}
strings = new_strings;
new_sizes = realloc(sizes, sizeof(unsigned long) * memory);
if (NULL == new_sizes) {
perror("realloc");
exit(1);
}
sizes = new_sizes;
}
}
kseq_destroy(seq);
fclose(fp);
*strings_ = strings;
*sizes_ = sizes;
*count_ = count;
}
static inline unsigned long binomial_coefficient(
unsigned long n,
unsigned long k)
{
/* from http://blog.plover.com/math/choose.html */
unsigned long r = 1;
unsigned long d;
if (k > n) {
return 0;
}
for (d = 1; d <= k; d++) {
r *= n--;
r /= d;
}
return r;
}
static inline void k_combination2(
unsigned long pos,
unsigned long *a,
unsigned long *b)
{
double s;
double i = floor(sqrt(2.0 * pos)) - 1.0;
if (i <= 1.0) {
i = 1.0;
}
s = i * (i - 1.0) / 2.0;
while (pos - s >= i) {
s += i;
i += 1;
}
*a = (unsigned long)(pos - s);
*b = (unsigned long)(i);
}
static inline int diff_array(
unsigned long size,
int *a,
int *b)
{
unsigned long i = 0;
for (i=0; i<size; ++i) {
if (a[i] != b[i]) return 1;
}
return 0;
}
static void check_functions(
parasail_function_group_t f,
char **sequences,
unsigned long *sizes,
unsigned long pair_limit,
const parasail_matrix_t *matrix_,
gap_score_t gap)
{
const parasail_function_info_t *functions = f.fs;
unsigned long matrix_index = 0;
unsigned long gap_index = 0;
unsigned long function_index = 0;
unsigned long pair_index = 0;
parasail_function_t *reference_function = NULL;
const parasail_matrix_t ** matrices = parasail_matrices;
const parasail_matrix_t * single_matrix[] = {
matrix_,
NULL
};
if (NULL != matrix_) {
matrices = single_matrix;
}
printf("checking %s functions\n", f.name);
for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) {
const parasail_matrix_t *matrix = matrices[matrix_index];
const char *matrixname = matrix->name;
if (verbose) printf("\t%s\n", matrixname);
for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) {
int open = gap_scores[gap_index].open;
int extend = gap_scores[gap_index].extend;
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
open = gap.open;
extend = gap.extend;
}
if (verbose) printf("\t\topen=%d extend=%d\n", open, extend);
reference_function = functions[0].pointer;
for (function_index=1;
NULL!=functions[function_index].pointer;
++function_index) {
if (verbose) printf("\t\t\t%s\n", functions[function_index].name);
unsigned long saturated = 0;
#pragma omp parallel for
for (pair_index=0; pair_index<pair_limit; ++pair_index) {
parasail_result_t *reference_result = NULL;
parasail_result_t *result = NULL;
unsigned long a = 0;
unsigned long b = 1;
k_combination2(pair_index, &a, &b);
//printf("\t\t\t\tpair=%lu (%lu,%lu)\n", pair_index, a, b);
reference_result = reference_function(
sequences[a], sizes[a],
sequences[b], sizes[b],
open, extend,
matrix);
result = functions[function_index].pointer(
sequences[a], sizes[a],
sequences[b], sizes[b],
open, extend,
matrix);
if (result->saturated) {
/* no point in comparing a result that saturated */
parasail_result_free(reference_result);
parasail_result_free(result);
#pragma omp atomic
saturated += 1;
continue;
}
if (reference_result->score != result->score) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n",
functions[function_index].name,
a, b, open, extend,
matrixname,
reference_result->score, result->score);
}
}
if (diff_array(
sizes[b],
reference_result->score_row,
result->score_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad score row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(
sizes[a],
reference_result->score_col,
result->score_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad score col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->matches_row
&& diff_array(
sizes[b],
reference_result->matches_row,
result->matches_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad matches row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->matches_col
&& diff_array(
sizes[a],
reference_result->matches_col,
result->matches_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad matches col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->similar_row
&& diff_array(
sizes[b],
reference_result->similar_row,
result->similar_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad similar row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->similar_col
&& diff_array(
sizes[a],
reference_result->similar_col,
result->similar_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad similar col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->length_row
&& diff_array(
sizes[b],
reference_result->length_row,
result->length_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad length row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->length_col
&& diff_array(
sizes[a],
reference_result->length_col,
result->length_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad length col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
parasail_result_free(reference_result);
parasail_result_free(result);
}
if (verbose && saturated) {
printf("%s %d %d %s saturated %lu times\n",
functions[function_index].name,
open, extend,
matrixname,
saturated);
}
}
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
/* user-specified gap, don't loop */
break;
}
}
}
}
int main(int argc, char **argv)
{
unsigned long i = 0;
unsigned long seq_count = 0;
unsigned long limit = 0;
char **sequences = NULL;
unsigned long *sizes = NULL;
char *endptr = NULL;
char *filename = NULL;
int c = 0;
int test_scores = 1;
int test_stats = 0;
char *matrixname = NULL;
const parasail_matrix_t *matrix = NULL;
gap_score_t gap = {INT_MIN,INT_MIN};
while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) {
switch (c) {
case 'f':
filename = optarg;
break;
case 'm':
matrixname = optarg;
break;
case 'n':
errno = 0;
seq_count = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'o':
errno = 0;
gap.open = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.open");
exit(1);
}
break;
case 'e':
errno = 0;
gap.extend = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.extend");
exit(1);
}
break;
case 'v':
verbose = 1;
break;
case 's':
test_stats = 1;
break;
case 'S':
test_scores = 0;
break;
case '?':
if (optopt == 'f' || optopt == 'n') {
fprintf(stderr,
"Option -%c requires an argument.\n",
optopt);
}
else if (isprint(optopt)) {
fprintf(stderr, "Unknown option `-%c'.\n",
optopt);
}
else {
fprintf(stderr,
"Unknown option character `\\x%x'.\n",
optopt);
}
exit(1);
default:
fprintf(stderr, "default case in getopt\n");
exit(1);
}
}
if (filename) {
parse_sequences(filename, &sequences, &sizes, &seq_count);
}
else {
fprintf(stderr, "no filename specified\n");
exit(1);
}
/* select the matrix */
if (matrixname) {
matrix = parasail_matrix_lookup(matrixname);
if (NULL == matrix) {
fprintf(stderr, "Specified substitution matrix not found.\n");
exit(1);
}
}
limit = binomial_coefficient(seq_count, 2);
printf("%lu choose 2 is %lu\n", seq_count, limit);
#if HAVE_SSE2
if (parasail_can_use_sse2()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_rowcol_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_rowcol_sse2, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_sse2, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_SSE41
if (parasail_can_use_sse41()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_rowcol_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_rowcol_sse41, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_sse41, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_AVX2
if (parasail_can_use_avx2()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_rowcol_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_rowcol_avx2, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_avx2, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_KNC
{
if (test_scores) {
check_functions(parasail_nw_rowcol_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_rowcol_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_rowcol_knc, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_knc, sequences, sizes, limit, matrix, gap);
}
}
#endif
if (test_scores) {
check_functions(parasail_nw_rowcol_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_rowcol_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_rowcol_disp, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_disp, sequences, sizes, limit, matrix, gap);
}
for (i=0; i<seq_count; ++i) {
free(sequences[i]);
}
free(sequences);
free(sizes);
return 0;
}
|
783.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp target teams distribute #p #p
for (i = 0; i < _PB_N; i++)
{
#pragma omp parallel for schedule(dynamic, 28)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
#pragma omp target teams distribute #p #p
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
main.c | #include "../comms.h"
#include "../mesh.h"
#include "../params.h"
#include "../profiler.h"
#include "../shared_data.h"
#include "neutral_interface.h"
#include "papi_multiplex_test.h"
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef MPI
#include "mpi.h"
#endif
void plot_particle_density(NeutralData* neutral_data, Mesh* mesh, const int tt,
const int nparticles, const double elapsed_sim_time);
int main(int argc, char** argv) {
if (argc != 4) {
TERMINATE("usage: ./neutral.exe <param_file> arch mode\n");
}
int arch = atoi(argv[2]);
char platform[1024];
int mode = atoi(argv[3]);
char filename[1024];
if(arch == 0)
{
sprintf(platform, "kp920");
if(mode == 0)
{
sprintf(filename, "/mnt/share/JKChen/PAPI_multiplex_test/events/%s/%s_events_mpx.txt", platform, platform);
}
else
{
sprintf(filename, "/mnt/share/JKChen/PAPI_multiplex_test/events/%s/%s_events_ocoe_%d.txt", platform, platform, mode);
}
}
else if(arch == 1)
{
sprintf(platform, "hsw");
if(mode == 0)
{
sprintf(filename, "/chpc/users/JKChen/PAPI_multiplex_test/events/%s/%s_events_mpx.txt", platform, platform);
}
else
{
sprintf(filename, "/chpc/users/JKChen/PAPI_multiplex_test/events/%s/%s_events_ocoe_%d.txt", platform, platform, mode);
}
}
mytest_papi_init(filename, "neutral", mode);
// Store the dimensions of the mesh
Mesh mesh;
NeutralData neutral_data;
neutral_data.neutral_params_filename = argv[1];
mesh.global_nx =
get_int_parameter("nx", neutral_data.neutral_params_filename);
mesh.global_ny =
get_int_parameter("ny", neutral_data.neutral_params_filename);
mesh.pad = 0;
mesh.local_nx = mesh.global_nx + 2 * mesh.pad;
mesh.local_ny = mesh.global_ny + 2 * mesh.pad;
mesh.width = get_double_parameter("width", ARCH_ROOT_PARAMS);
mesh.height = get_double_parameter("height", ARCH_ROOT_PARAMS);
mesh.dt = get_double_parameter("dt", neutral_data.neutral_params_filename);
mesh.sim_end = get_double_parameter("sim_end", ARCH_ROOT_PARAMS);
mesh.niters =
get_int_parameter("iterations", neutral_data.neutral_params_filename);
mesh.rank = MASTER;
mesh.nranks = 1;
mesh.ndims = 2;
const int visit_dump =
get_int_parameter("visit_dump", neutral_data.neutral_params_filename);
// Get the number of threads and initialise the random number pool
#ifdef _OPENMP
#pragma omp parallel
{ neutral_data.nthreads = omp_get_num_threads(); }
#else
neutral_data.nthreads = 1;
#endif
printf("Starting up with %d OpenMP threads.\n", neutral_data.nthreads);
printf("Loading problem from %s.\n", neutral_data.neutral_params_filename);
#ifdef ENABLE_PROFILING
/* The timing code has to be called so many times that the API calls
* actually begin to influence the performance dramatically. */
fprintf(stderr,
"Warning. Profiling is enabled and will increase the runtime.\n\n");
#endif
// Perform the general initialisation steps for the mesh etc
initialise_mpi(argc, argv, &mesh.rank, &mesh.nranks);
initialise_devices(mesh.rank);
initialise_comms(&mesh);
initialise_mesh_2d(&mesh);
SharedData shared_data = {0};
initialise_shared_data_2d(mesh.local_nx, mesh.local_ny, mesh.pad, mesh.width,
mesh.height, neutral_data.neutral_params_filename, mesh.edgex, mesh.edgey, &shared_data);
handle_boundary_2d(mesh.local_nx, mesh.local_ny, &mesh, shared_data.density,
NO_INVERT, PACK);
initialise_neutral_data(&neutral_data, &mesh);
// Make sure initialisation phase is complete
barrier();
// Main timestep loop where we will track each particle through time
int tt;
double wallclock = 0.0;
double elapsed_sim_time = 0.0;
struct Profile profile;
for (tt = 1; tt <= mesh.niters; ++tt) {
if (mesh.rank == MASTER) {
printf("\nIteration %d\n", tt);
}
if (visit_dump) {
plot_particle_density(&neutral_data, &mesh, tt, neutral_data.nparticles,
elapsed_sim_time);
}
uint64_t facet_events = 0;
uint64_t collision_events = 0;
START_PROFILING(&profile);
// Begin the main solve step
solve_transport_2d(
mesh.local_nx - 2 * mesh.pad, mesh.local_ny - 2 * mesh.pad,
mesh.global_nx, mesh.global_ny, tt, mesh.pad, mesh.x_off, mesh.y_off,
mesh.dt, neutral_data.nparticles, &neutral_data.nlocal_particles,
mesh.neighbours, neutral_data.local_particles,
shared_data.density, mesh.edgex, mesh.edgey, mesh.edgedx, mesh.edgedy,
neutral_data.cs_scatter_table, neutral_data.cs_absorb_table,
neutral_data.energy_deposition_tally, neutral_data.nfacets_reduce_array,
neutral_data.ncollisions_reduce_array, neutral_data.nprocessed_reduce_array,
&facet_events, &collision_events);
barrier();
const char p = '0' + tt;
STOP_PROFILING(&profile, &p);
double step_time = profile.profiler_entries[tt-1].time;
wallclock += step_time;
printf("Step time %.4fs\n", step_time);
printf("Wallclock %.4fs\n", wallclock);
printf("Facets %lu\n", facet_events);
printf("Collisions %lu\n", collision_events);
// Note that this metric is only valid in the single event case
printf("Facet Events / s %.2e\n", facet_events / step_time);
printf("Collision Events / s %.2e\n", collision_events / step_time);
elapsed_sim_time += mesh.dt;
if (visit_dump) {
char tally_name[100];
sprintf(tally_name, "energy%d", tt);
int dneighbours[NNEIGHBOURS] = {EDGE, EDGE, EDGE, EDGE, EDGE, EDGE};
write_all_ranks_to_visit(
mesh.global_nx, mesh.global_ny, mesh.local_nx - 2 * mesh.pad,
mesh.local_ny - 2 * mesh.pad, mesh.pad, mesh.x_off, mesh.y_off,
mesh.rank, mesh.nranks, dneighbours,
neutral_data.energy_deposition_tally, tally_name, 0,
elapsed_sim_time);
}
// Leave the simulation if we have reached the simulation end time
if (elapsed_sim_time >= mesh.sim_end) {
if (mesh.rank == MASTER)
printf("Reached end of simulation time\n");
break;
}
}
if (visit_dump) {
plot_particle_density(&neutral_data, &mesh, tt, neutral_data.nparticles,
elapsed_sim_time);
}
validate(mesh.local_nx - 2 * mesh.pad, mesh.local_ny - 2 * mesh.pad,
neutral_data.neutral_params_filename, mesh.rank,
neutral_data.energy_deposition_tally);
if (mesh.rank == MASTER) {
//PRINT_PROFILING_RESULTS(&p);
printf("Final Wallclock %.9fs\n", wallclock);
printf("Elapsed Simulation Time %.6fs\n", elapsed_sim_time);
}
mytest_papi_stop();
return 0;
}
// This is a bit hacky and temporary for now
void plot_particle_density(NeutralData* neutral_data, Mesh* mesh, const int tt,
const int nparticles,
const double elapsed_sim_time) {
double* temp =
(double*)malloc(sizeof(double) * mesh->local_nx * mesh->local_ny);
if (!temp) {
TERMINATE("Could not allocate data for printing.\n");
}
for (int ii = 0; ii < nparticles; ++ii) {
Particle* particle = &neutral_data->local_particles[ii];
#ifdef SoA
const int cellx = particle->cellx[ii] - mesh->x_off;
const int celly = particle->celly[ii] - mesh->y_off;
#else
const int cellx = particle->cellx - mesh->x_off;
const int celly = particle->celly - mesh->y_off;
#endif
temp[celly * (mesh->local_nx - 2 * mesh->pad) + cellx] += 1.0;
}
// Dummy neighbours that stops any padding from happening
int neighbours[NNEIGHBOURS] = {EDGE, EDGE, EDGE, EDGE, EDGE, EDGE};
char particles_name[100];
sprintf(particles_name, "particles%d", tt);
write_all_ranks_to_visit(
mesh->global_nx, mesh->global_ny, mesh->local_nx - 2 * mesh->pad,
mesh->local_ny - 2 * mesh->pad, mesh->pad, mesh->x_off, mesh->y_off,
mesh->rank, mesh->nranks, neighbours, temp, particles_name, 0,
elapsed_sim_time);
free(temp);
}
|
dead.c | Rgb getColorFLT( float colorPoly, int rgb, int n, int maxiter ) {
switch ( rgb ) {
case 1:
return getRgbFromTable( n, maxiter );
case 2:
return getRgbSmooth( n, maxiter );
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return getRgbSmoothCPFLT( colorPoly, n, maxiter );
case 9:
return getRgb2( n, maxiter );
case 10:
return getRgb3( n, maxiter );
case 11:
return getRgb4( n, maxiter );
case 12:
return getRgb5( n, maxiter );
case 13:
return getRgbSmoothCPFLT( colorPoly, n, maxiter );
default:
return getRgb( n, maxiter );
}
}
Rgb getColorDBL( double colorPoly, int rgb, int n, int maxiter ) {
switch ( rgb ) {
case 1:
return getRgbFromTable( n, maxiter );
case 2:
return getRgbSmooth( n, maxiter );
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return getRgbSmoothCPDBL( colorPoly, n, maxiter );
case 9:
return getRgb2( n, maxiter );
case 10:
return getRgb3( n, maxiter );
case 11:
return getRgb4( n, maxiter );
case 12:
return getRgb5( n, maxiter );
case 13:
return getRgbSmoothCPDBL( colorPoly, n, maxiter );
default:
return getRgb( n, maxiter );
}
}
Rgb getColorLDBL( long double colorPoly, int rgb, int n, int maxiter ) {
switch ( rgb ) {
case 1:
return getRgbFromTable( n, maxiter );
case 2:
return getRgbSmooth( n, maxiter );
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return getRgbSmoothCPLDBL( colorPoly, n, maxiter );
case 9:
return getRgb2( n, maxiter );
case 10:
return getRgb3( n, maxiter );
case 11:
return getRgb4( n, maxiter );
case 12:
return getRgb5( n, maxiter );
case 13:
return getRgbSmoothCPLDBL( colorPoly, n, maxiter );
default:
return getRgb( n, maxiter );
}
}
Rgb getColor128( __float128 colorPoly, int rgb, int n, int maxiter ) {
switch ( rgb ) {
case 1:
return getRgbFromTable( n, maxiter );
case 2:
return getRgbSmooth( n, maxiter );
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return getRgbSmoothCP128( colorPoly, n, maxiter );
case 9:
return getRgb2( n, maxiter );
case 10:
return getRgb3( n, maxiter );
case 11:
return getRgb4( n, maxiter );
case 12:
return getRgb5( n, maxiter );
case 13:
return getRgbSmoothCP128( colorPoly, n, maxiter );
default:
return getRgb( n, maxiter );
}
}
Parameters msetDBL( Parameters P ) {
Parameters g = P;
im = gdImageCreateTrueColor( g.width, g.height );
#ifdef AXIS
AxesDBL ctaxes = ct_gain_axesDBL( g.centerX, g.centerY, g.diameter, g.height, g.width );
#endif
#pragma omp parallel shared(im)
{
ColorDBL cp;
#pragma omp for schedule(dynamic)
for (int Y = 0; Y < g.height; Y++ ) {
#ifdef AXIS
cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor;
#else
double y = ( Y - g.height2 ) / ( g.height2 );
#endif
for (int X = 0; X < g.width; X++ ) {
#ifdef AXIS
cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor;
#else
double x = ( X - g.width2 ) / ( g.height2 );
complex double c = g.center + g.radius * ( x - I * y );
cp.Cx = creal( c );
cp.Cy = cimag( c );
#endif
cp.Zx = 0.0;
cp.Zy = 0.0;
cp.colorPoly = 0;
cp.Exps = 0;
double Zx2;
double Zy2;
for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) {
Zx2 = cp.Zx * cp.Zx;
Zy2 = cp.Zy * cp.Zy;
if ( ( Zx2 + Zy2 ) > g.escape )
break;
cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy;
cp.Zx = Zx2 - Zy2 + cp.Cx;
cp.Exps += exp( Zx2 + Zy2 );
}
if ( cp.n < g.maxiter ) {
g.nMax = max( cp.n, g.nMax );
}
gdImageSetPixel( im, X, Y, getfColorDBL( g, cp ) );
}
}
}
return g;
}
Parameters msetLDBL( Parameters P ) {
Parameters g = P;
im = gdImageCreateTrueColor( g.width, g.height );
#ifdef AXIS
AxesLDBL ctaxes = ct_gain_axesLDBL( g.centerX, g.centerY, g.diameter, g.height, g.width );
#endif
#pragma omp parallel shared(im)
{
ColorLDBL cp;
#pragma omp for schedule(dynamic)
for (int Y = 0; Y < g.height; Y++ ) {
#ifdef AXIS
cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor;
#else
long double y = ( Y - g.height2 ) / ( g.height2 );
#endif
for (int X = 0; X < g.width; X++ ) {
#ifdef AXIS
cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor;
#else
long double x = ( X - g.width2 ) / ( g.height2 );
complex long double c = g.center + g.radius * ( x - I * y );
cp.Cx = creal( c );
cp.Cy = cimag( c );
#endif
cp.Zx = 0.0;
cp.Zy = 0.0;
cp.colorPoly = 0;
cp.Exps = 0;
long double Zx2;
long double Zy2;
for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) {
Zx2 = cp.Zx * cp.Zx;
Zy2 = cp.Zy * cp.Zy;
if ( ( Zx2 + Zy2 ) > g.escape )
break;
cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy;
cp.Zx = Zx2 - Zy2 + cp.Cx;
cp.Exps += exp( Zx2 + Zy2 );
}
if ( cp.n < g.maxiter ) {
g.nMax = max( cp.n, g.nMax );
}
gdImageSetPixel( im, X, Y, getfColorLDBL( g, cp ) );
}
}
}
return g;
}
Parameters msetFLT128( Parameters P ) {
Parameters g = P;
im = gdImageCreateTrueColor( g.width, g.height );
#ifdef AXIS
AxesFLT128 ctaxes = ct_gain_axesFLT128( g.centerX, g.centerY, g.diameter, g.height, g.width );
#endif
#pragma omp parallel shared(im)
{
Color128 cp;
#pragma omp for schedule(dynamic)
for (int Y = 0; Y < g.height; Y++ ) {
#ifdef AXIS
cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor;
#else
__float128 y = ( Y - g.height2 ) / ( g.height2 );
#endif
for (int X = 0; X < g.width; X++ ) {
#ifdef AXIS
cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor;
#else
__float128 x = ( X - g.width2 ) / ( g.height2 );
__complex128 c = g.center + g.radius * ( x - I * y );
cp.Cx = creal( c );
cp.Cy = cimag( c );
#endif
cp.Zx = 0.0;
cp.Zy = 0.0;
cp.colorPoly = 0;
cp.Exps = 0;
__float128 Zx2;
__float128 Zy2;
for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) {
Zx2 = cp.Zx * cp.Zx;
Zy2 = cp.Zy * cp.Zy;
if ( ( Zx2 + Zy2 ) > g.escape )
break;
cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy;
cp.Zx = Zx2 - Zy2 + cp.Cx;
cp.Exps += exp( Zx2 + Zy2 );
}
if ( cp.n < g.maxiter ) {
g.nMax = max( cp.n, g.nMax );
}
gdImageSetPixel( im, X, Y, getfColor128( g, cp ) );
}
}
}
return g;
}
#ifdef AXIS
AxesFLT ct_gain_axesFLT( float real, float imag, float diameter, int height, int width ) {
float radius = diameter / 2.0;
AxesFLT ctaxes = {
real - diameter,
real + radius,
imag - radius,
imag + radius,
0.0,
0.0,
};
float ctwidth = ctaxes.xmax - ctaxes.xmin;
float ctheight = ctaxes.ymax - ctaxes.ymin;
float ctdaspect = fabsf( ( float ) height / ( float ) width );
float ctwaspect = fabsf( ctheight / ctwidth );
if ( ctdaspect > ctwaspect ) {
float excess = ctheight * ( ctdaspect / ctwaspect - 1 );
ctaxes.ymax += excess / 2;
ctaxes.ymin -= excess / 2;
} else if ( ctdaspect < ctwaspect ) {
float excess = ctwidth * ( ctwaspect / ctdaspect - 1 );
ctaxes.xmax += excess / 2;
ctaxes.xmin -= excess / 2;
}
ctwidth = ctaxes.xmax - ctaxes.xmin;
ctheight = ctaxes.ymax - ctaxes.ymin;
ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width );
ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height );
return ctaxes;
}
AxesDBL ct_gain_axesDBL( double real, double imag, double diameter, int height, int width ) {
double radius = diameter / 2.0;
AxesDBL ctaxes = {
real - diameter,
real + radius,
imag - radius,
imag + radius,
0.0,
0.0,
};
double ctwidth = ctaxes.xmax - ctaxes.xmin;
double ctheight = ctaxes.ymax - ctaxes.ymin;
double ctdaspect = fabs( ( double ) height / ( double ) width );
double ctwaspect = fabs( ctheight / ctwidth );
if ( ctdaspect > ctwaspect ) {
double excess = ctheight * ( ctdaspect / ctwaspect - 1 );
ctaxes.ymax += excess / 2;
ctaxes.ymin -= excess / 2;
} else if ( ctdaspect < ctwaspect ) {
double excess = ctwidth * ( ctwaspect / ctdaspect - 1 );
ctaxes.xmax += excess / 2;
ctaxes.xmin -= excess / 2;
}
ctwidth = ctaxes.xmax - ctaxes.xmin;
ctheight = ctaxes.ymax - ctaxes.ymin;
ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width );
ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height );
return ctaxes;
}
AxesLDBL ct_gain_axesLDBL( long double real, long double imag, long double diameter, int height, int width ) {
long double radius = diameter / 2.0;
AxesLDBL ctaxes = {
real - diameter,
real + radius,
imag - radius,
imag + radius,
0.0,
0.0,
};
long double ctwidth = ctaxes.xmax - ctaxes.xmin;
long double ctheight = ctaxes.ymax - ctaxes.ymin;
long double ctdaspect = fabsl( ( long double ) height / ( long double ) width );
long double ctwaspect = fabsl( ctheight / ctwidth );
if ( ctdaspect > ctwaspect ) {
long double excess = ctheight * ( ctdaspect / ctwaspect - 1 );
ctaxes.ymax += excess / 2;
ctaxes.ymin -= excess / 2;
} else if ( ctdaspect < ctwaspect ) {
long double excess = ctwidth * ( ctwaspect / ctdaspect - 1 );
ctaxes.xmax += excess / 2;
ctaxes.xmin -= excess / 2;
}
ctwidth = ctaxes.xmax - ctaxes.xmin;
ctheight = ctaxes.ymax - ctaxes.ymin;
ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width );
ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height );
return ctaxes;
}
AxesFLT128 ct_gain_axesFLT128( __float128 real, __float128 imag, __float128 diameter, int height, int width ) {
__float128 radius = diameter / 2.0;
AxesFLT128 ctaxes = {
real - radius,
real + radius,
imag - radius,
imag + radius,
0.0,
0.0,
};
__float128 ctwidth = ctaxes.xmax - ctaxes.xmin;
__float128 ctheight = ctaxes.ymax - ctaxes.ymin;
__float128 ctdaspect = fabsq( ( __float128 ) height / ( __float128 ) width );
__float128 ctwaspect = fabsq( ctheight / ctwidth );
if ( ctdaspect > ctwaspect ) {
__float128 excess = ctheight * ( ctdaspect / ctwaspect - 1 );
ctaxes.ymax += excess / 2;
ctaxes.ymin -= excess / 2;
} else if ( ctdaspect < ctwaspect ) {
__float128 excess = ctwidth * ( ctwaspect / ctdaspect - 1 );
ctaxes.xmax += excess / 2;
ctaxes.xmin -= excess / 2;
}
ctwidth = ctaxes.xmax - ctaxes.xmin;
ctheight = ctaxes.ymax - ctaxes.ymin;
ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width );
ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height );
return ctaxes;
}
#endif
#ifdef AXIS
typedef struct {
__float128 xmin;
__float128 xmax;
__float128 ymin;
__float128 ymax;
__float128 ctxfactor;
__float128 ctyfactor;
} AxesFLT128;
typedef struct {
long double xmin;
long double xmax;
long double ymin;
long double ymax;
long double ctxfactor;
long double ctyfactor;
} AxesLDBL;
typedef struct {
double xmin;
double xmax;
double ymin;
double ymax;
double ctxfactor;
double ctyfactor;
} AxesDBL;
typedef struct {
float xmin;
float xmax;
float ymin;
float ymax;
float ctxfactor;
float ctyfactor;
} AxesFLT;
#endif
#ifdef AXIS
AxesDBL ct_gain_axesDBL( double real, double imag, double diameter, int height, int width );
AxesFLT ct_gain_axesFLT( float real, float imag, float diameter, int height, int width );
AxesFLT128 ct_gain_axesFLT128( __float128 real, __float128 imag, __float128 diameter, int height, int width );
AxesLDBL ct_gain_axesLDBL( long double real, long double imag, long double diameter, int height, int width );
#endif
#ifdef COORD
void coordinateLDBL( long double *Cx, long double *Cy, int i, int j, Parameters g ) {
long double x = ( i - g.width / 2.0 ) / ( g.height / 2.0 );
long double y = ( j - g.height / 2.0 ) / ( g.height / 2.0 );
complex long double c = g.center + g.radius * ( x - I * y );
*Cx = creal( c );
*Cy = cimag( c );
}
void coordinateDBL( double *Cx, double *Cy, int i, int j, Parameters g ) {
double x = ( i - g.width / 2.0 ) / ( g.height / 2.0 );
double y = ( j - g.height / 2.0 ) / ( g.height / 2.0 );
complex double c = g.center + g.radius * ( x - I * y );
*Cx = creal( c );
*Cy = cimag( c );
}
void coordinateFLT( float *Cx, float *Cy, int i, int j, Parameters g ) {
float x = ( i - g.width / 2.0 ) / ( g.height / 2.0 );
float y = ( j - g.height / 2.0 ) / ( g.height / 2.0 );
complex c = g.center + g.radius * ( x - I * y );
*Cx = creal( c );
*Cy = cimag( c );
}
void coordinateFLT128( __float128 *Cx, __float128 *Cy, int i, int j, Parameters g ) {
__float128 x = ( i - g.width / 2.0 ) / ( g.height / 2.0 );
__float128 y = ( j - g.height / 2.0 ) / ( g.height / 2.0 );
__complex128 c = g.center + g.radius * ( x - I * y );
*Cx = creal( c );
*Cy = cimag( c );
}
#endif
|
region_layer.c | #include "region_layer.h"
#include "activations.h"
#include "blas.h"
#include "box.h"
#include "dark_cuda.h"
#include "utils.h"
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#define DOABS 1
region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes)
{
region_layer l = { (LAYER_TYPE)0 };
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.cost = (float*)xcalloc(1, sizeof(float));
l.biases = (float*)xcalloc(n * 2, sizeof(float));
l.bias_updates = (float*)xcalloc(n * 2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.max_boxes = max_boxes;
l.truth_size = 4 + 2;
l.truths = max_boxes*l.truth_size;
l.delta = (float*)xcalloc(batch * l.outputs, sizeof(float));
l.output = (float*)xcalloc(batch * l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
l.backward = backward_region_layer;
#ifdef GPU
l.forward_gpu = forward_region_layer_gpu;
l.backward_gpu = backward_region_layer_gpu;
l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif
fprintf(stderr, "detection\n");
srand(time(0));
return l;
}
void resize_region_layer(layer *l, int w, int h)
{
#ifdef GPU
int old_w = l->w;
int old_h = l->h;
#endif
l->w = w;
l->h = h;
l->outputs = h*w*l->n*(l->classes + l->coords + 1);
l->inputs = l->outputs;
l->output = (float*)xrealloc(l->output, l->batch * l->outputs * sizeof(float));
l->delta = (float*)xrealloc(l->delta, l->batch * l->outputs * sizeof(float));
#ifdef GPU
//if (old_w < w || old_h < h)
{
cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
}
#endif
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w;
b.y = (j + logistic_activate(x[index + 1])) / h;
b.w = exp(x[index + 2]) * biases[2*n];
b.h = exp(x[index + 3]) * biases[2*n+1];
if(DOABS){
b.w = exp(x[index + 2]) * biases[2*n] / w;
b.h = exp(x[index + 3]) * biases[2*n+1] / h;
}
return b;
}
float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale)
{
box pred = get_region_box(x, biases, n, index, i, j, w, h);
float iou = box_iou(pred, truth);
float tx = (truth.x*w - i);
float ty = (truth.y*h - j);
float tw = log(truth.w / biases[2*n]);
float th = log(truth.h / biases[2*n + 1]);
if(DOABS){
tw = log(truth.w*w / biases[2*n]);
th = log(truth.h*h / biases[2*n + 1]);
}
delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0]));
delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1]));
delta[index + 2] = scale * (tw - x[index + 2]);
delta[index + 3] = scale * (th - x[index + 3]);
return iou;
}
void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss)
{
int i, n;
if(hier){
float pred = 1;
while(class_id >= 0){
pred *= output[index + class_id];
int g = hier->group[class_id];
int offset = hier->group_offset[g];
for(i = 0; i < hier->group_size[g]; ++i){
delta[index + offset + i] = scale * (0 - output[index + offset + i]);
}
delta[index + class_id] = scale * (1 - output[index + class_id]);
class_id = hier->parent[class_id];
}
*avg_cat += pred;
} else {
// Focal loss
if (focal_loss) {
// Focal Loss
float alpha = 0.5; // 0.25 or 0.5
//float gamma = 2; // hardcoded in many places of the grad-formula
int ti = index + class_id;
float pt = output[ti] + 0.000000000000001F;
// http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d
float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832
//float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
delta[index + n] *= alpha*grad;
if (n == class_id) *avg_cat += output[index + n];
}
}
else {
// default
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
if (n == class_id) *avg_cat += output[index + n];
}
}
}
}
float logit(float x)
{
return log(x/(1.-x));
}
float tisnan(float x)
{
return (x != x);
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc;
}
void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output);
void forward_region_layer(const region_layer l, network_state state)
{
int i,j,b,t,n;
int size = l.coords + l.classes + 1;
memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
#endif
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
l.output[index + 4] = logistic_activate(l.output[index + 4]);
}
}
#ifndef GPU
if (l.softmax_tree){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
} else if (l.softmax){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1);
}
}
}
#endif
if(!state.train) return;
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
float avg_iou = 0;
float recall = 0;
float avg_cat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
int class_count = 0;
*(l.cost) = 0;
for (b = 0; b < l.batch; ++b) {
if(l.softmax_tree){
int onlyclass_id = 0;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths);
if(!truth.x) break; // continue;
int class_id = state.truth[t*l.truth_size + b*l.truths + 4];
float maxp = 0;
int maxi = 0;
if(truth.x > 100000 && truth.y > 100000){
for(n = 0; n < l.n*l.w*l.h; ++n){
int index = size*n + b*l.outputs + 5;
float scale = l.output[index-1];
float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id);
if(p > maxp){
maxp = p;
maxi = n;
}
}
int index = size*maxi + b*l.outputs + 5;
delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++class_count;
onlyclass_id = 1;
break;
}
}
if(onlyclass_id) continue;
}
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
float best_iou = 0;
int best_class_id = -1;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths);
int class_id = state.truth[t * l.truth_size + b*l.truths + 4];
if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file
if(!truth.x) break; // continue;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
best_class_id = state.truth[t*l.truth_size + b*l.truths + 4];
best_iou = iou;
}
}
avg_anyobj += l.output[index + 4];
l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
else{
if (best_iou > l.thresh) {
l.delta[index + 4] = 0;
if(l.classfix > 0){
delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss);
++class_count;
}
}
}
if(*(state.net.seen) < 12800){
box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
truth.w = l.biases[2*n];
truth.h = l.biases[2*n+1];
if(DOABS){
truth.w = l.biases[2*n]/l.w;
truth.h = l.biases[2*n+1]/l.h;
}
delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01);
}
}
}
}
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths);
int class_id = state.truth[t * l.truth_size + b*l.truths + 4];
if (class_id >= l.classes) {
printf("\n Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1);
getchar();
continue; // if label contains class_id more than number of classes in the cfg-file
}
if(!truth.x) break; // continue;
float best_iou = 0;
int best_index = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
//printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h);
box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
//printf("index %d %d\n",i, j);
for(n = 0; n < l.n; ++n){
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
if(l.bias_match){
pred.w = l.biases[2*n];
pred.h = l.biases[2*n+1];
if(DOABS){
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
}
}
//printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h);
pred.x = 0;
pred.y = 0;
float iou = box_iou(pred, truth_shift);
if (iou > best_iou){
best_index = index;
best_iou = iou;
best_n = n;
}
}
//printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h);
float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale);
if(iou > .5) recall += 1;
avg_iou += iou;
//l.delta[best_index + 4] = iou - l.output[best_index + 4];
avg_obj += l.output[best_index + 4];
l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
if (l.rescore) {
l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
}
if (l.map) class_id = l.map[class_id];
delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++count;
++class_count;
}
}
//printf("\n");
#ifndef GPU
flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0);
#endif
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
}
void backward_region_layer(const region_layer l, network_state state)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1);
}
void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i;
float *const predictions = l.output;
#pragma omp parallel for
for (i = 0; i < l.w*l.h; ++i){
int j, n;
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = i*l.n + n;
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index];
if(l.classfix == -1 && scale < .5) scale = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
if(l.softmax_tree){
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if(map){
for(j = 0; j < 200; ++j){
float prob = scale*predictions[class_index+map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else {
for(j = l.classes - 1; j >= 0; --j){
if(!found && predictions[class_index + j] > .5){
found = 1;
} else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index+j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
} else {
for(j = 0; j < l.classes; ++j){
float prob = scale*predictions[class_index+j];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
if(only_objectness){
probs[index][0] = scale;
}
}
}
}
#ifdef GPU
void forward_region_layer_gpu(const region_layer l, network_state state)
{
/*
if(!state.train){
copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
return;
}
*/
flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu);
if(l.softmax_tree){
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count);
count += group_size;
}
}else if (l.softmax){
softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5);
}
float* in_cpu = (float*)xcalloc(l.batch * l.inputs, sizeof(float));
float *truth_cpu = 0;
if(state.truth){
int num_truth = l.batch*l.truths;
truth_cpu = (float*)xcalloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
}
cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs);
//cudaStreamSynchronize(get_cuda_stream());
network_state cpu_state = state;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
forward_region_layer(l, cpu_state);
//cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
free(cpu_state.input);
if(!state.train) return;
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
//cudaStreamSynchronize(get_cuda_stream());
if(cpu_state.truth) free(cpu_state.truth);
}
void backward_region_layer_gpu(region_layer l, network_state state)
{
flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta);
}
#endif
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w = 0;
int new_h = 0;
if (((float)netw / w) < ((float)neth / h)) {
new_w = netw;
new_h = (h * netw) / w;
}
else {
new_h = neth;
new_w = (w * neth) / h;
}
for (i = 0; i < n; ++i) {
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw);
b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth);
b.w *= (float)netw / new_w;
b.h *= (float)neth / new_h;
if (!relative) {
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i, j, n, z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w / 2; ++i) {
for (n = 0; n < l.n; ++n) {
for (z = 0; z < l.classes + l.coords + 1; ++z) {
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if (z == 0) {
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for (i = 0; i < l.outputs; ++i) {
l.output[i] = (l.output[i] + flip[i]) / 2.;
}
}
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
for (n = 0; n < l.n; ++n) {
int index = n*l.w*l.h + i;
for (j = 0; j < l.classes; ++j) {
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if (dets[index].mask) {
for (j = 0; j < l.coords - 4; ++j) {
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if (l.softmax_tree) {
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h);
if (map) {
for (j = 0; j < 200; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
else {
int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
dets[index].prob[j] = (scale > thresh) ? scale : 0;
}
}
else {
if (dets[index].objectness) {
for (j = 0; j < l.classes; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void zero_objectness(layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i) {
for (n = 0; n < l.n; ++n) {
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
l.output[obj_index] = 0;
}
}
}
|
trmv_x_sky_n_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
}
for(ALPHA_INT c = 0; c < n; ++c)
{
const ALPHA_INT col_start = A->pointers[c];
const ALPHA_INT col_end = A->pointers[c + 1];
ALPHA_INT col_indx = 1;
for(ALPHA_INT ai = col_start; ai < col_end; ++ai)
{
ALPHA_INT col_eles = col_end - col_start;
ALPHA_INT r = c - col_eles + col_indx;
ALPHA_Number t;
alpha_mul(t, alpha, A->values[ai]);
alpha_madde(y[r], t, x[c]);
col_indx ++;
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
DRB113-default-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
default(none) to enforce explictly list all variables in data-sharing attribute clauses
default(shared) to cover another option.
*/
#include <omp.h>
int a[100][100];
int b[100][100];
int main()
{
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = i;
b[i][j] = i;
}
}
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
}
}
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
b[i][j] = b[i][j] + 1;
}
}
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
printf("%d %d\n",a[i][j],b[i][j]);
}
}
return 0;
}
|
GB_unop__identity_fc64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_bool)
// op(A') function: GB (_unop_tran__identity_fc64_bool)
// C type: GxB_FC64_t
// A type: bool
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_bool)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tool_available_search.c | // RUN: %clang %flags -shared -fPIC %s -o %T/first_tool.so
// RUN: %clang %flags -DTOOL -DSECOND_TOOL -shared -fPIC %s -o %T/second_tool.so
// RUN: %clang %flags -DTOOL -DTHIRD_TOOL -shared -fPIC %s -o %T/third_tool.so
// RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/non_existing_file.so:%T/first_tool.so:%T/second_tool.so:%T/third_tool.so %libomp-run | FileCheck %s
// REQUIRES: ompt
/*
* This file contains code for three OMPT shared library tool to be
* loaded and the code for the OpenMP executable.
* No option enables code for the first shared library
* (without an implementation of ompt_start_tool) during compilation
* -DTOOL -DSECOND_TOOL enables the code for the second tool during compilation
* -DTOOL -DTHIRD_TOOL enables the code for the third tool during compilation
* -DCODE enables the code for the executable during compilation
*/
#ifdef CODE
#include "stdio.h"
#include "omp.h"
#include "ompt.h"
int main()
{
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
int result = omp_control_tool(omp_control_tool_start, 0, NULL);
printf("0: control_tool()=%d\n", result);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: Do not initialize tool
// CHECK: {{^}}0: Do initialize tool
// CHECK: {{^}}0: Tool initialized
// CHECK: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: control_tool()=-1
// CHECK: {{^}}0: Tool finalized
return 0;
}
#endif /* CODE */
#ifdef TOOL
#include <ompt.h>
#include "stdio.h"
#ifdef SECOND_TOOL
// The second tool has an implementation of ompt_start_tool that returns NULL
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do not initialize tool\n");
return NULL;
}
#elif defined(THIRD_TOOL)
// The third tool has an implementation of ompt_start_tool that returns a
// pointer to a valid instance of ompt_start_tool_result_t
static void
on_ompt_callback_thread_begin(
ompt_thread_type_t thread_type,
ompt_data_t *thread_data)
{
printf("0: ompt_event_thread_begin\n");
}
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback_t ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_set_callback(ompt_callback_thread_begin, (ompt_callback_t)on_ompt_callback_thread_begin);
printf("0: Tool initialized\n");
return 1;
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: Tool finalized\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do initialize tool\n");
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#endif
#endif /* TOOL */
|
residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "utilities/builtin_timer.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard elimination builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
/// Definition of the classes from the base class
typedef typename BaseType::SizeType SizeType;
typedef typename BaseType::IndexType IndexType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
/// Definition of the equation id vector
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
/// Node definition
typedef Node<3> NodeType;
/// Containers definition
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedEliminationBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(LHS_Contribution, RHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_elem, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_cond, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
}
}
}
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void SystemSolve(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else
TSparseSpace::SetToZero(rDx);
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, BaseType::mDofSet, rModelPart);
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx);
KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, rA, rb);
Timer::Stop("Build");
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
SystemSolve(rA, rDx, rb);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
// Resetting to zero the vector of reactions
if(BaseType::mCalculateReactionsFlag) {
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
}
// Getting the Elements
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
#pragma omp parallel firstprivate( RHS_Contribution, equation_id)
{
const auto it_elem_begin = r_elements_array.begin();
const int nelements = static_cast<int>(r_elements_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; ++i) {
auto it_elem = it_elem_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it_elem, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
// Assemble all conditions
const auto it_cond_begin = r_conditions_array.begin();
const int nconditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; ++i) {
auto it_cond = it_cond_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateRHSContribution(*it_cond, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
// Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int nelements = static_cast<int>(r_elements_array.size());
DofsVectorType elemental_dof_list;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
SizeType nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
std::vector<set_type> dofs_aux_list(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); ++i) {
dofs_aux_list[i].reserve(nelements);
}
#pragma omp parallel for firstprivate(nelements, elemental_dof_list)
for (int i = 0; i < static_cast<int>(nelements); ++i) {
auto it_elem = r_elements_array.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end());
}
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int nconditions = static_cast<int>(r_conditions_array.size());
#pragma omp parallel for firstprivate(nconditions, elemental_dof_list)
for (int i = 0; i < nconditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end());
}
// Here we do a reduction in a tree so to have everything on thread 0
SizeType old_max = nthreads;
SizeType new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max) {
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); ++i) {
if (i + new_max < old_max) {
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType dof_temp;
BaseType::mDofSet = DofsArrayType();
dof_temp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); ++it) {
dof_temp.push_back(*it);
}
dof_temp.Sort();
BaseType::mDofSet = dof_temp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef USE_LOCKS_IN_ASSEMBLY
if (mLockArray.size() != 0) {
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_destroy_lock(&mLockArray[i]);
}
mLockArray.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_init_lock(&mLockArray[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag()) {
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(ModelPart& rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
/**
* @brief This method resize and initializes the system of euqations
* @param pA The pointer to the LHS matrix
* @param pDx The pointer to the vector of Unknowns
* @param pb The pointer to the RHS vector
* @param rModelPart The model part to be computed
*/
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& rA = *pA;
TSystemVectorType& rDx = *pDx;
TSystemVectorType& rb = *pb;
// Resizing the system vectors and matrix
if (rA.size1() == 0 || BaseType::GetReshapeMatrixFlag()) { // If the matrix is not initialized
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, rA, rModelPart);
} else {
if (rA.size1() != BaseType::mEquationSystemSize || rA.size2() != BaseType::mEquationSystemSize) {
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, rA, rModelPart);
}
}
if (rDx.size() != BaseType::mEquationSystemSize) {
rDx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rDx);
if (rb.size() != BaseType::mEquationSystemSize) {
rb.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rb);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true) {
const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != reactions_vector_size)
BaseType::mpReactionsVector->resize(reactions_vector_size, false);
}
KRATOS_CATCH("")
}
/**
* @brief This method computes the reactions
* @param pScheme The integration scheme considered
* @param rModelPart The model part considered
* @param rA The LHS of the system
* @param rDx The vector of Unknowns
* @param rb The RHS vector
*/
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme, rModelPart, rb);
// Updating variables
std::size_t i;
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) {
i = (*it2)->EquationId();
if (i >= BaseType::mEquationSystemSize) {
i -= BaseType::mEquationSystemSize;
(*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i];
}
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
this->mpReactionsVector.reset();
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "elimination_builder_and_solver"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "elimination_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef USE_LOCKS_IN_ASSEMBLY
std::vector<omp_lock_t> mLockArray;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assembles the system
* @param rA The LHS of the system
* @param rb The RHS of the system
* @param rLHSContribution The LHS local contribution
* @param rRHSContribution The RHS local contribution
* @param rEquationId The equation id
* @param rLockArray The lock of the dof
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling
*/
void Assemble(
TSystemMatrixType& rA,
TSystemVectorType& rb,
const LocalSystemMatrixType& rLHSContribution,
const LocalSystemVectorType& rRHSContribution,
const Element::EquationIdVectorType& rEquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,std::vector< omp_lock_t >& rLockArray
#endif
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&rLockArray[i_global]);
rb[i_global] += rRHSContribution(i_local);
#else
double& r_a = rb[i_global];
const double& v_a = rRHSContribution(i_local);
#pragma omp atomic
r_a += v_a;
#endif
AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&rLockArray[i_global]);
#endif
}
//note that computation of reactions is not performed here!
}
}
/**
* @brief This method construcs the relationship between the DoF
* @param pScheme The integration scheme
* @param rA The LHS of the system
* @param rModelPart The model part which defines the problem
*/
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& rA,
ModelPart& rModelPart
)
{
// Filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const SizeType equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<IndexType> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<IndexType> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = it_elem_begin + i_elem;
pScheme->EquationId( *it_elem, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = it_cond_begin + i_cond;
pScheme->EquationId( *it_cond, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
// Count the row sizes
SizeType nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
rA = TSystemMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (IndexType i = 0; i < rA.size1(); ++i)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rA.size1()); ++i) {
const IndexType row_begin = Arow_indices[i];
const IndexType row_end = Arow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
++k;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
rA.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
/**
* @brief This method assembles the LHS of the system
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHS(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
if (j_global < BaseType::mEquationSystemSize) {
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
}
/**
* @brief This function is equivalent to the AssembleRowContribution of the block builder and solver
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped
*/
inline void AssembleRowContributionFreeDofs(
TSystemMatrixType& rA,
const Matrix& rALocal,
const IndexType i,
const IndexType i_local,
const Element::EquationIdVectorType& EquationId
)
{
double* values_vector = rA.value_data().begin();
IndexType* index1_vector = rA.index1_data().begin();
IndexType* index2_vector = rA.index2_data().begin();
const IndexType left_limit = index1_vector[i];
// Find the first entry
// We iterate over the equation ids until we find the first equation id to be considered
// We count in which component we find an ID
IndexType last_pos = 0;
IndexType last_found = 0;
IndexType counter = 0;
for(IndexType j=0; j < EquationId.size(); ++j) {
++counter;
const IndexType j_global = EquationId[j];
if (j_global < BaseType::mEquationSystemSize) {
last_pos = ForwardFind(j_global,left_limit,index2_vector);
last_found = j_global;
break;
}
}
// If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered
if (counter <= EquationId.size()) {
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r_a = values_vector[last_pos];
const double& v_a = rALocal(i_local,counter - 1);
#pragma omp atomic
r_a += v_a;
#else
values_vector[last_pos] += rALocal(i_local,counter - 1);
#endif
// Now find all of the other entries
IndexType pos = 0;
for(IndexType j = counter; j < EquationId.size(); ++j) {
IndexType id_to_find = EquationId[j];
if (id_to_find < BaseType::mEquationSystemSize) {
if(id_to_find > last_found)
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
else if(id_to_find < last_found)
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
else
pos = last_pos;
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r = values_vector[pos];
const double& v = rALocal(i_local,j);
#pragma omp atomic
r += v;
#else
values_vector[pos] += rALocal(i_local,j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
}
}
inline IndexType ForwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline IndexType BackwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method ensures that the contribution is unique
*/
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
/**
* @brief This method assembles the RHS of the system
* @param rb The RHS to assemble
* @param rRHSContribution The local RHS contribution
* @param rEquationId The equation id
*/
void AssembleRHS(
TSystemVectorType& rb,
const LocalSystemVectorType& rRHSContribution,
const EquationIdVectorType& rEquationId
)
{
SizeType local_size = rRHSContribution.size();
if (BaseType::mCalculateReactionsFlag == false) {
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { // Free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
} else {
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { //free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
} else { // Fixed dof
double& b_value = r_reactions_vector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
/**
* @brief This method assembles the LHS of the system (on free rows)
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHSCompleteOnFreeRows(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
GB_unop__identity_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4b-4a-inch/4a-64-outch/4b;
#if __aarch64__
kernel_tm_pack4.create(2 * inch/4, 64, (outch/4)/2 + (outch/4)%2, (size_t)4u*16, 16);
#else
kernel_tm_pack4.create(inch/4, 64, outch/4, (size_t)4u*16, 16);
#endif
int q=0;
#if __aarch64__
for (; q+7<outch; q+=8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q+1);
const Mat k2 = kernel_tm.channel(q+2);
const Mat k3 = kernel_tm.channel(q+3);
const Mat k4 = kernel_tm.channel(q+4);
const Mat k5 = kernel_tm.channel(q+5);
const Mat k6 = kernel_tm.channel(q+6);
const Mat k7 = kernel_tm.channel(q+7);
Mat g0 = kernel_tm_pack4.channel(q/8);
for (int k=0; k<64; k++)
{
float* g00 = g0.row(k);
for (int p=0; p+3<inch; p+=4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p+1);
const float* k02 = k0.row(p+2);
const float* k03 = k0.row(p+3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p+1);
const float* k12 = k1.row(p+2);
const float* k13 = k1.row(p+3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p+1);
const float* k22 = k2.row(p+2);
const float* k23 = k2.row(p+3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p+1);
const float* k32 = k3.row(p+2);
const float* k33 = k3.row(p+3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p+1);
const float* k42 = k4.row(p+2);
const float* k43 = k4.row(p+3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p+1);
const float* k52 = k5.row(p+2);
const float* k53 = k5.row(p+3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p+1);
const float* k62 = k6.row(p+2);
const float* k63 = k6.row(p+3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p+1);
const float* k72 = k7.row(p+2);
const float* k73 = k7.row(p+3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q+3<outch; q+=4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q+1);
const Mat k2 = kernel_tm.channel(q+2);
const Mat k3 = kernel_tm.channel(q+3);
#if __aarch64__
Mat g0 = kernel_tm_pack4.channel(q/8+(q%8)/4);
#else
Mat g0 = kernel_tm_pack4.channel(q/4);
#endif
for (int k=0; k<64; k++)
{
float* g00 = g0.row(k);
for (int p=0; p+3<inch; p+=4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p+1);
const float* k02 = k0.row(p+2);
const float* k03 = k0.row(p+3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p+1);
const float* k12 = k1.row(p+2);
const float* k13 = k1.row(p+3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p+1);
const float* k22 = k2.row(p+2);
const float* k23 = k2.row(p+3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p+1);
const float* k32 = k3.row(p+2);
const float* k33 = k3.row(p+3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k01[k];
g00[5] = k11[k];
g00[6] = k21[k];
g00[7] = k31[k];
g00[8] = k02[k];
g00[9] = k12[k];
g00[10] = k22[k];
g00[11] = k32[k];
g00[12] = k03[k];
g00[13] = k13[k];
g00[14] = k23[k];
g00[15] = k33[k];
g00 += 16;
}
}
}
}
static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m=0; m<8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm/8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m=0; m<8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm/8 * w_tm/8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles/12 + (tiles%12)/8 + (tiles%12%8)/4 + (tiles%12%4)/2 + tiles%12%2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
#if __aarch64__
for (; i+11<tiles; i+=12)
{
float* tm2p = tm2.row(i/12);
const float* r0 = bottom_blob_tm;
r0 += (r*tiles + i) * 4;
for (int q=0; q<inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i+7<tiles; i+=8)
{
#if __aarch64__
float* tm2p = tm2.row(i/12 + (i%12)/8);
#else
float* tm2p = tm2.row(i/8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r*tiles + i) * 4;
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
);
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i+3<tiles; i+=4)
{
#if __aarch64__
float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4);
#else
float* tm2p = tm2.row(i/8 + (i%8)/4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r*tiles + i) * 4;
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3"
);
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i+1<tiles; i+=2)
{
#if __aarch64__
float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
#else
float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r*tiles + i) * 4;
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1"
);
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i<tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
#else
float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r*tiles + i) * 4;
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0"
);
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0"
);
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i=0;
for (; i+11<tiles; i+=12)
{
const float* r0 = bb2.row(i/12);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+7<tiles; i+=8)
{
const float* r0 = bb2.row(i/12 + (i%12)/8);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r4 r5 r6 r7
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<tiles; i+=4)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i+1<tiles; i+=2)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
}
for (; i<tiles; i++)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"// r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"
);
}
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p/2+p%2);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i=0;
#if __aarch64__
for (; i+11<tiles; i+=12)
{
const float* r0 = bb2.row(i/12);
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif
for (; i+7<tiles; i+=8)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8);
#else
const float* r0 = bb2.row(i/8);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; i+3<tiles; i+=4)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif
}
for (; i+1<tiles; i+=2)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"
);
#endif
}
for (; i<tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"// r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"
);
#else
asm volatile(
"veor q8, q8 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float32x4_t _bias0 = bias ? vld1q_f32( (const float*)bias + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm/8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m=0; m<8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m=0; m<6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"// r04 r05 r06 r07
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v28.4s}, [%1] \n"// r08
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"// r14 r15 r16 r17
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v10.s[0] \n"
"fmla v22.4s, v24.4s, v12.s[0] \n"
"fmla v23.4s, v24.4s, v14.s[0] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v10.s[1] \n"
"fmla v22.4s, v25.4s, v12.s[1] \n"
"fmla v23.4s, v25.4s, v14.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v10.s[2] \n"
"fmla v22.4s, v26.4s, v12.s[2] \n"
"fmla v23.4s, v26.4s, v14.s[2] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v10.s[3] \n"
"fmla v22.4s, v27.4s, v12.s[3] \n"
"fmla v23.4s, v27.4s, v14.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v28.4s}, [%2] \n"// r18
"fmla v20.4s, v16.4s, v9.s[0] \n"
"fmla v21.4s, v16.4s, v11.s[0] \n"
"fmla v22.4s, v16.4s, v13.s[0] \n"
"fmla v23.4s, v16.4s, v15.s[0] \n"
"fmla v20.4s, v17.4s, v9.s[1] \n"
"fmla v21.4s, v17.4s, v11.s[1] \n"
"fmla v22.4s, v17.4s, v13.s[1] \n"
"fmla v23.4s, v17.4s, v15.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v9.s[2] \n"
"fmla v21.4s, v18.4s, v11.s[2] \n"
"fmla v22.4s, v18.4s, v13.s[2] \n"
"fmla v23.4s, v18.4s, v15.s[2] \n"
"fmla v20.4s, v19.4s, v9.s[3] \n"
"fmla v21.4s, v19.4s, v11.s[3] \n"
"fmla v22.4s, v19.4s, v13.s[3] \n"
"fmla v23.4s, v19.4s, v15.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v20.4s, v24.4s, v10.s[0] \n"
"fmla v21.4s, v24.4s, v12.s[0] \n"
"fmla v22.4s, v24.4s, v14.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v10.s[1] \n"
"fmla v21.4s, v25.4s, v12.s[1] \n"
"fmla v22.4s, v25.4s, v14.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v10.s[2] \n"
"fmla v21.4s, v26.4s, v12.s[2] \n"
"fmla v22.4s, v26.4s, v14.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v10.s[3] \n"
"fmla v21.4s, v27.4s, v12.s[3] \n"
"fmla v22.4s, v27.4s, v14.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r24 r25 r26 r27
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v28.4s}, [%3] \n"// r28
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"// r04 r05 r06 r07
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"// r08
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"// r10 r11 r12 r13
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r14 r15 r16 r17
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d8-d9}, [%2 :128] \n"// r18
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"// r24 r25 r26 r27
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3 :128] \n"// r28
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1] \n"// r04
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.4s}, [%2] \n"// r14
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3] \n"// r24
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128] \n"// r04
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r10 r11 r12 r13
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d8-d9}, [%2 :128] \n"// r14
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3 :128] \n"// r24
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"// sum0
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n"// r00 r01 r02
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n"// r10 r11 r12
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n"// r20 r21 r22
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v5.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"add %1, %1, #32 \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"add %2, %2, #32 \n"
"fadd v23.4s, v23.4s, v22.4s \n"
"add %3, %3, #32 \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n"// sum0
"pld [%1, #384] \n"
"vldm %1, {d0-d5} \n"// r00 r01 r02
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmul.f32 q13, q8, d0[0] \n"
"vmul.f32 q14, q9, d0[1] \n"
"vmul.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%2, #384] \n"
"vldm %2, {d0-d5} \n"// r10 r11 r12
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%3, #384] \n"
"vldm %3, {d0-d5} \n"// r20 r21 r22
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vadd.f32 q14, q14, q13 \n"
"add %1, %1, #32 \n"
"vadd.f32 q15, q15, q14 \n"
"add %2, %2, #32 \n"
"vadd.f32 q12, q12, q15 \n"
"add %3, %3, #32 \n"
"sub %4, %4, #512 \n"// kptr -= 8 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
SPOSet.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Ying Wai Li, yingwaili@ornl.gov, Oak Ridge National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_SINGLEPARTICLEORBITALSETBASE_H
#define QMCPLUSPLUS_SINGLEPARTICLEORBITALSETBASE_H
#include "OhmmsPETE/OhmmsArray.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "io/hdf_archive.h"
#if !defined(ENABLE_SOA)
#include "Message/CommOperators.h"
#endif
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
namespace qmcplusplus
{
/** base class for Single-particle orbital sets
*
* SPOSet stands for S(ingle)P(article)O(rbital)Set which contains
* a number of single-particle orbitals with capabilities of evaluating \f$ \psi_j({\bf r}_i)\f$
*/
class SPOSet : public QMCTraits
{
public:
typedef OrbitalSetTraits<ValueType>::IndexVector_t IndexVector_t;
typedef OrbitalSetTraits<ValueType>::ValueVector_t ValueVector_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradVector_t GradVector_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
typedef OrbitalSetTraits<ValueType>::HessMatrix_t HessMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef Array<HessType, OHMMS_DIM> HessArray_t;
typedef OrbitalSetTraits<ValueType>::GradHessType GGGType;
typedef OrbitalSetTraits<ValueType>::GradHessVector_t GGGVector_t;
typedef OrbitalSetTraits<ValueType>::GradHessMatrix_t GGGMatrix_t;
typedef OrbitalSetTraits<ValueType>::VGLVector_t VGLVector_t;
typedef ParticleSet::Walker_t Walker_t;
typedef std::map<std::string, SPOSet*> SPOPool_t;
/** name of the object
*
* Several user classes can own SPOSet and use objectName as counter
*/
std::string objectName;
#if !defined(ENABLE_SOA)
///true if C is an identity matrix
bool Identity;
///if true, do not clean up
bool IsCloned;
///number of Single-particle orbitals
IndexType BasisSetSize;
/** pointer matrix containing the coefficients
*
* makeClone makes a shallow copy
*/
ValueMatrix_t* C;
///occupation number
Vector<RealType> Occ;
///Pass Communicator
Communicate* myComm;
#endif
/** constructor */
SPOSet(bool ion_deriv = false, bool optimizable = false);
/** destructor
*
* Derived class destructor needs to pay extra attention to freeing memory shared among clones of SPOSet.
*/
virtual ~SPOSet()
{
#if !defined(ENABLE_SOA)
if (!IsCloned && C != nullptr)
delete C;
#endif
}
// accessor function to Optimizable
inline bool isOptimizable() const { return Optimizable; }
/** return the size of the orbital set
* Ye: this needs to be replaced by getOrbitalSetSize();
*/
inline int size() const { return OrbitalSetSize; }
/** print basic SPOSet information
*/
void basic_report(const std::string& pad = "");
/** print SPOSet information
*/
virtual void report(const std::string& pad = "") { basic_report(pad); }
/** return the size of the orbitals
*/
inline int getOrbitalSetSize() const { return OrbitalSetSize; }
/** Query if this SPOSet has an explicit ion dependence. returns true if it does.
*/
inline bool hasIonDerivs() const { return ionDerivs; }
#if !defined(ENABLE_SOA)
int getBasisSetSize() const { return BasisSetSize; }
bool setIdentity(bool useIdentity);
void checkObject();
///get C and Occ
bool put(xmlNodePtr cur);
#else
/// return the size of the basis set if there is any
virtual int getBasisSetSize() const { return 0; }
/// check a few key parameters before putting the SPO into a determinant
virtual void checkObject() const {}
#endif
/// create optimizable orbital rotation parameters
// Single Slater creation
virtual void buildOptVariables(const size_t nel) {}
// For the MSD case rotations must be created in MultiSlaterFast class
virtual void buildOptVariables(const std::vector<std::pair<int, int>>& rotations) {}
// store parameters before getting destroyed by rotation.
virtual void storeParamsBeforeRotation() {}
// apply rotation to all the orbitals
virtual void applyRotation(const ValueMatrix_t& rot_mat, bool use_stored_copy = false)
{
std::ostringstream o;
o << "SPOSet::applyRotation is not implemented by " << className << std::endl;
APP_ABORT(o.str());
}
/// reset parameters to the values from optimizer
virtual void resetParameters(const opt_variables_type& optVariables) = 0;
/// check in/out parameters to the global list of parameters used by the optimizer
virtual void checkInVariables(opt_variables_type& active) {}
virtual void checkOutVariables(const opt_variables_type& active) {}
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi,
const int& FirstIndex,
const int& LastIndex) {}
/** Evaluate the derivative of the optimized orbitals with respect to the parameters
* this is used only for MSD, to be refined for better serving both single and multi SD
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi,
const ValueType& psiCurrent,
const std::vector<ValueType>& Coeff,
const std::vector<size_t>& C2node_up,
const std::vector<size_t>& C2node_dn,
const ValueVector_t& detValues_up,
const ValueVector_t& detValues_dn,
const GradMatrix_t& grads_up,
const GradMatrix_t& grads_dn,
const ValueMatrix_t& lapls_up,
const ValueMatrix_t& lapls_dn,
const ValueMatrix_t& M_up,
const ValueMatrix_t& M_dn,
const ValueMatrix_t& Minv_up,
const ValueMatrix_t& Minv_dn,
const GradMatrix_t& B_grad,
const ValueMatrix_t& B_lapl,
const std::vector<int>& detData_up,
const size_t N1,
const size_t N2,
const size_t NP1,
const size_t NP2,
const std::vector<std::vector<int>>& lookup_tbl)
{}
/** reset the target particleset
* this is used to reset the pointer to ion-electron distance table needed by LCAO basis set.
* Ye: Only AoS needs it, SoA LCAO doesn't need this. Reseting pointers is a state machine very hard to maintain.
* This interface should be removed with AOS.
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** set the OrbitalSetSize
* @param norbs number of single-particle orbitals
* Ye: I prefer to remove this interface in the future. SPOSet builders need to handle the size correctly.
* It doesn't make sense allowing to set the value at any place in the code.
*/
virtual void setOrbitalSetSize(int norbs) = 0;
/** Evaluate the SPO value at an explicit position.
* Ye: This is used only for debugging the CUDA code and should be removed.
*/
virtual void evaluate(const ParticleSet& P, PosType& r, ValueVector_t& psi);
/** evaluate the values of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
*/
virtual void evaluate(const ParticleSet& P, int iat, ValueVector_t& psi) = 0;
/** evaluate the values of this single-particle orbital sets of multiple walkers
* @param spo_list the list of SPOSet pointers in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat active particle
* @param psi_v_list the list of value vector pointers in a walker batch
*/
virtual void mw_evaluateValue(const std::vector<SPOSet*>& spo_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<ValueVector_t*>& psi_v_list)
{
#pragma omp parallel for
for (int iw = 0; iw < spo_list.size(); iw++)
spo_list[iw]->evaluate(*P_list[iw], iat, *psi_v_list[iw]);
}
/** evaluate determinant ratios for virtual moves, e.g., sphere move for nonlocalPP
* @param VP virtual particle set
* @param psi values of the SPO, used as a scratch space if needed
* @param psiinv the row of inverse slater matrix corresponding to the particle moved virtually
* @param ratios return determinant ratios
*/
virtual void evaluateDetRatios(const VirtualParticleSet& VP,
ValueVector_t& psi,
const ValueVector_t& psiinv,
std::vector<ValueType>& ratios);
/** evaluate the values, gradients and laplacians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param d2psi laplacians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
ValueVector_t& d2psi) = 0;
/** evaluate the values, gradients and laplacians of this single-particle orbital sets of multiple walkers
* @param spo_list the list of SPOSet pointers in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat active particle
* @param psi_v_list the list of value vector pointers in a walker batch
* @param dpsi_v_list the list of gradient vector pointers in a walker batch
* @param d2psi_v_list the list of laplacian vector pointers in a walker batch
*/
virtual void mw_evaluateVGL(const std::vector<SPOSet*>& spo_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<ValueVector_t*>& psi_v_list,
const std::vector<GradVector_t*>& dpsi_v_list,
const std::vector<ValueVector_t*>& d2psi_v_list)
{
#pragma omp parallel for
for (int iw = 0; iw < spo_list.size(); iw++)
spo_list[iw]->evaluate(*P_list[iw], iat, *psi_v_list[iw], *dpsi_v_list[iw], *d2psi_v_list[iw]);
}
/** evaluate the values, gradients and hessians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param grad_grad_psi hessians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
HessVector_t& grad_grad_psi);
/** evaluate the values, gradients, hessians, and grad hessians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param grad_grad_psi hessians of the SPO
* @param grad_grad_grad_psi grad hessians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
HessVector_t& grad_grad_psi,
GGGVector_t& grad_grad_grad_psi);
/** evaluate the third derivatives of this single-particle orbital set
* @param P current ParticleSet
* @param first first particle
* @param last last particle
* @param grad_grad_grad_logdet third derivatives of the SPO
*/
virtual void evaluateThirdDeriv(const ParticleSet& P, int first, int last, GGGMatrix_t& grad_grad_grad_logdet);
/** evaluate the values, gradients and laplacians of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param d2logdet laplacians
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
ValueMatrix_t& d2logdet) = 0;
/** evaluate the values, gradients and hessians of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param grad_grad_logdet hessians
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
HessMatrix_t& grad_grad_logdet);
/** evaluate the values, gradients, hessians and third derivatives of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param grad_grad_logdet hessians
* @param grad_grad_grad_logdet third derivatives
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
HessMatrix_t& grad_grad_logdet,
GGGMatrix_t& grad_grad_grad_logdet);
/** evaluate the gradients of this single-particle orbital
* for [first,last) target particles with respect to the given source particle
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param iat_src source particle index
* @param gradphi gradients
*
*/
virtual void evaluateGradSource(const ParticleSet& P,
int first,
int last,
const ParticleSet& source,
int iat_src,
GradMatrix_t& gradphi);
/** evaluate the gradients of values, gradients, laplacians of this single-particle orbital
* for [first,last) target particles with respect to the given source particle
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param iat_src source particle index
* @param gradphi gradients of values
* @param grad_grad_phi gradients of gradients
* @param grad_lapl_phi gradients of laplacians
*
*/
virtual void evaluateGradSource(const ParticleSet& P,
int first,
int last,
const ParticleSet& source,
int iat_src,
GradMatrix_t& grad_phi,
HessMatrix_t& grad_grad_phi,
GradMatrix_t& grad_lapl_phi);
/** access the k point related to the given orbital */
virtual PosType get_k(int orb) { return PosType(); }
/** make a clone of itself
* every derived class must implement this to have threading working correctly.
*/
virtual SPOSet* makeClone() const;
/** Used only by cusp correction in AOS LCAO.
* Ye: the SoA LCAO moves all this responsibility to the builder.
* This interface should be removed with AoS.
*/
virtual bool transformSPOSet() { return true; }
/** finalize the construction of SPOSet
*
* for example, classes serving accelerators may need to transfer data from host to device
* after the host side objects are built.
*/
virtual void finalizeConstruction() {}
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
//////////////////////////////////////////
// Walker-parallel vectorized functions //
//////////////////////////////////////////
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool) {}
virtual void evaluate(std::vector<Walker_t*>& walkers, int iat, gpu::device_vector<CTS::ValueType*>& phi);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi,
gpu::device_vector<CTS::ValueType*>& grad_lapl_list,
int row_stride);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi,
gpu::device_vector<CTS::ValueType*>& grad_lapl_list,
int row_stride,
int k,
bool klinear);
virtual void evaluate(std::vector<PosType>& pos, gpu::device_vector<CTS::RealType*>& phi);
virtual void evaluate(std::vector<PosType>& pos, gpu::device_vector<CTS::ComplexType*>& phi);
#endif
#if !defined(ENABLE_SOA)
protected:
bool putOccupation(xmlNodePtr occ_ptr);
bool putFromXML(xmlNodePtr coeff_ptr);
bool putFromH5(const std::string& fname, xmlNodePtr coeff_ptr);
#endif
protected:
///true, if the derived class has non-zero ionic derivatives.
const bool ionDerivs;
///true if SPO is optimizable
const bool Optimizable;
///number of Single-particle orbitals
IndexType OrbitalSetSize;
/// Optimizable variables
opt_variables_type myVars;
///name of the class
std::string className;
};
typedef SPOSet* SPOSetPtr;
} // namespace qmcplusplus
#endif
|
pi_task.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
This version of the program uses a divide and concquer algorithm
with tasks and taskwait.
History: Written by Tim Mattson, 10/2013
*/
#include <omp.h>
#include <stdio.h>
static long num_steps = 1024*1024*1024;
#define MIN_BLK 1024*256
#define MAX 4
double pi_comp(int Nstart,int Nfinish,double step)
{
int i, iblk;
double x, sum = 0.0, sum1, sum2;
if (Nfinish - Nstart < MIN_BLK) {
for (i = Nstart; i < Nfinish; i++){
x = (i + 0.5) * step;
sum = sum + 4.0 / (1.0 + x*x);
}
}
else {
iblk = Nfinish - Nstart;
#pragma omp task shared(sum1)
sum1 = pi_comp(Nstart, Nfinish - iblk/2, step);
#pragma omp task shared(sum2)
sum2 = pi_comp(Nfinish - iblk/2, Nfinish, step);
#pragma omp taskwait
sum = sum1 + sum2;
}
return sum;
}
int main ()
{
int i ,j;
double step, pi, sum;
double init_time, final_time;
step = 1.0 / (double) num_steps;
for (j = 1; j <= MAX; j++){
omp_set_num_threads(j);
init_time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
{
printf("num threads=%d", omp_get_num_threads());
sum = pi_comp(0, num_steps, step);
}
}
pi = step * sum;
final_time = omp_get_wtime() - init_time;
printf(" for %ld steps pi = %f in %f secs\n", num_steps, pi, final_time);
}
}
|
boundaries.c | #include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
extern void CXX_Walk_Int(char *l, const char *h, const size_t sz, unsigned int *b);
extern void CXX_Walk_Double(char *l, const char *h, const size_t sz, double *b);
#ifdef __cplusplus
}
#endif
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include "allocator.h"
#include "geometry.h"
//#include "fio.h"
#include "mesh.h"
#include "boundaries.h"
static size_t
fbmalloc(char * fbuf, struct bface * f)
{
size_t sz = f->sz * 4;
uint32_t *buf = (uint32_t *) fun3d_malloc(sz, sizeof(uint32_t));
size_t bytes = sz * sizeof(uint32_t);
//struct wtbl w;
//{
// w.l = fbuf;
// w.h = fbuf + bytes;
// w.t = UINT;
// w.sz = sz;
//}
//walkfbuf(&w, buf);
CXX_Walk_Int(fbuf, fbuf + bytes, sz, buf);
uint32_t i;
#pragma omp parallel for
for(i = 0; i < f->sz; i++)
{
f->fptr[i].f0 = buf[i] - 1;
f->fptr[i].f1 = buf[i + f->sz] - 1;
f->fptr[i].f2 = buf[i + f->sz + f->sz] - 1;
f->fptr[i].f3 = buf[i + f->sz + f->sz + f->sz] - 1;
}
fun3d_free(buf);
return bytes;
}
static size_t
nbmalloc(char * fbuf, struct bnode * n)
{
size_t bytes = n->sz * sizeof(uint32_t);
//struct wtbl w0;
//{
// w0.l = fbuf;
// w0.h = fbuf + bytes;
// w0.t = UINT;
// w0.sz = n->sz;
//}
//walkfbuf(&w0, n->nptr);
CXX_Walk_Int(fbuf, fbuf + bytes, n->sz, n->nptr);
size_t sz = n->sz * 3;
size_t bytes_ = sz * sizeof(double);
double *buf = (double *) fun3d_malloc(sz, sizeof(double));
//struct wtbl w1;
//{
// w1.l = fbuf + bytes;
// w1.h = w1.l + bytes_;
// w1.t = DOUBLE;
// w1.sz = sz;
//}
//walkfbuf(&w1, buf);
CXX_Walk_Double(fbuf + bytes, fbuf + bytes + bytes_, sz, buf);
uint32_t i;
#pragma omp parallel for
for(i = 0; i < n->sz; i++)
{
n->nptr[i]--;
n->xyz->x0[i] = buf[i];
n->xyz->x1[i] = buf[i + n->sz];
n->xyz->x2[i] = buf[i + n->sz + n->sz];
}
fun3d_free(buf);
return (bytes + bytes_);
}
static size_t
bmallocl(
const size_t p1,
const size_t p2,
const size_t p3,
char * fbuf,
struct boundary * b)
{
// Shift the file pointer to avoid reading the boundaries data,
// which they are negligible in our core calculations
size_t bytes = p3 * 2 * sizeof(uint32_t);
struct bface *f = (struct bface *) fun3d_malloc(1, sizeof(struct bface));
f->sz = p1;
f->fptr = (struct facet *) fun3d_malloc(f->sz, sizeof(struct facet));
bytes += fbmalloc((fbuf + bytes), f);
b->f = f;
struct bnode *n = (struct bnode *) fun3d_malloc(1, sizeof(struct bnode));
n->sz = p2;
n->nptr = (uint32_t *) fun3d_malloc(n->sz, sizeof(uint32_t));
n->xyz = (struct xyz *) fun3d_malloc(1, sizeof(struct xyz));
n->xyz->x0 = (double *) fun3d_malloc(n->sz, sizeof(double));
n->xyz->x1 = (double *) fun3d_malloc(n->sz, sizeof(double));
n->xyz->x2 = (double *) fun3d_malloc(n->sz, sizeof(double));
bytes += nbmalloc((fbuf + bytes), n);
b->n = n;
return bytes;
}
size_t
bmalloc(const uint32_t *p, char *fbuf, struct btbl *b)
{
struct boundary *s = (struct boundary *) fun3d_malloc(1, sizeof(struct boundary));
struct boundary *f = (struct boundary *) fun3d_malloc(1, sizeof(struct boundary));
size_t bytes = 0;
bytes += bmallocl(p[6], p[9], p[3], fbuf, s);
bytes += bmallocl(p[8], p[11], p[5], (fbuf + bytes), f);
uint32_t *snfn0 = (uint32_t *) fun3d_malloc(s->f->sz, sizeof(uint32_t));
uint32_t *snfn1 = (uint32_t *) fun3d_malloc(s->f->sz, sizeof(uint32_t));
uint32_t *snfn2 = (uint32_t *) fun3d_malloc(s->f->sz, sizeof(uint32_t));
uint32_t i;
for(i = 0; i < s->f->sz; i++)
{
uint32_t n0 = s->n->nptr[s->f->fptr[i].f0];
uint32_t n1 = s->n->nptr[s->f->fptr[i].f1];
uint32_t n2 = s->n->nptr[s->f->fptr[i].f2];
snfn0[i] = n0;
snfn1[i] = n1;
snfn2[i] = n2;
}
struct face *fptr = (struct face *) fun3d_malloc(1, sizeof(struct face));
fptr->n0 = snfn0;
fptr->n1 = snfn1;
fptr->n2 = snfn2;
struct bntbl *sn = (struct bntbl *) fun3d_malloc(1, sizeof(struct bntbl));
struct bntbl *fn = (struct bntbl *) fun3d_malloc(1, sizeof(struct bntbl));
struct bftbl *fc = (struct bftbl *) fun3d_malloc(1, sizeof(struct bftbl));
sn->sz = s->n->sz;
sn->nptr = s->n->nptr;
sn->xyz = s->n->xyz;
fn->sz = f->n->sz;
fn->nptr = f->n->nptr;
fn->xyz = f->n->xyz;
fc->sz = s->f->sz;
fc->fptr = fptr;
b->s = sn;
b->f = fn;
b->fc = fc;
fun3d_free(s->f->fptr);
fun3d_free(s->f);
fun3d_free(s->n);
fun3d_free(s);
fun3d_free(f->f->fptr);
fun3d_free(f->f);
fun3d_free(f->n);
fun3d_free(f);
return bytes;
} |
mkl_convolution-inl.h | /*******************************************************************************
* Copyright 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file mkl_convolution-inl.h
* \brief
* \author lingyan.guo@intel.com
* zhenlin.luo@intel.com
*
*******************************************************************************/
#ifndef MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#define MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#include <mxnet/storage.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "../operator_common.h"
#include "../nn/convolution-inl.h"
#include "./mkl_util-inl.h"
namespace mxnet {
namespace op {
template<typename xpu, typename DType>
class MKLConvolutionOp : public Operator {
public:
static std::string getName() {
return "MKLConvolutionOp";
}
void SetupBuffer() {
convolutionBwdBias = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdFilter = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdData = static_cast<dnnPrimitive_t>(NULL);
convolutionFwd = static_cast<dnnPrimitive_t>(NULL);
fwd_bottom_data = MKLData<DType>::create();
fwd_top_data = MKLData<DType>::create();
fwd_filter_data = MKLData<DType>::create();
fwd_bias_data = MKLData<DType>::create();
bwdd_top_diff = MKLData<DType>::create();
bwdd_bottom_diff = MKLData<DType>::create();
bwdd_filter_data = MKLData<DType>::create();
bwdf_top_diff = MKLData<DType>::create();
bwdf_filter_diff = MKLData<DType>::create();
bwdf_bottom_data = MKLData<DType>::create();
bwdb_top_diff = MKLData<DType>::create();
bwdb_bias_diff = MKLData<DType>::create();
// Names are for debugging purposes only.
fwd_bottom_data->name = "fwd_bottom_data @ " + this->getName();
fwd_top_data->name = "fwd_top_data @ " + this->getName();
fwd_filter_data->name = "fwd_filter_data @ " + this->getName();
fwd_bias_data->name = "fwd_bias_data @ " + this->getName();
bwdd_top_diff->name = "bwdd_top_diff @ " + this->getName();
bwdd_bottom_diff->name = "bwdd_bottom_diff @ " + this->getName();
bwdd_filter_data->name = "bwdd_filter_data @ " + this->getName();
bwdf_top_diff->name = "bwdf_top_diff @ " + this->getName();
bwdf_bottom_data->name = "bwdf_bottom_data @ " + this->getName();
bwdf_filter_diff->name = "bwdf_filter_diff @ " + this->getName();
bwdb_top_diff->name = "bwdb_top_diff @ " + this->getName();
bwdb_bias_diff->name = "bwdb_bias_diff @ " + this->getName();
}
explicit MKLConvolutionOp(ConvolutionParam p):
convolutionFwd(NULL),
convolutionBwdData(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdFilter(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdBias(static_cast<dnnPrimitive_t>(NULL)) {
this->param_ = p;
init_mkldnn_ = false;
// convert MBytes first to Bytes and then to elements.
param_.workspace = (param_.workspace << 20) / sizeof(DType);
SetupBuffer();
}
void ReleaseBuffer() {
if (convolutionFwd != NULL) {
dnnDelete<DType>(convolutionFwd);
convolutionFwd = NULL;
}
if (convolutionBwdData != NULL) {
dnnDelete<DType>(convolutionBwdData);
convolutionBwdData = NULL;
}
if (convolutionBwdFilter != NULL) {
dnnDelete<DType>(convolutionBwdFilter);
convolutionBwdFilter = NULL;
}
if (!param_.no_bias && convolutionBwdBias != NULL) {
dnnDelete<DType>(convolutionBwdBias);
convolutionBwdBias = NULL;
}
}
virtual ~MKLConvolutionOp() {
ReleaseBuffer();
}
private:
void LayerSetUp(const mshadow::Tensor<xpu, 4, DType> &data,
const mshadow::Tensor<xpu, 4, DType> &out) {
this->width_ = data.shape_[3];
this->height_ = data.shape_[2];
this->channels_ = data.shape_[1];
this->num_ = data.shape_[0];
this->group_ = param_.num_group;
this->width_out_ = out.shape_[3];
this->height_out_ = out.shape_[2];
int channel_out_ = out.shape_[1];
this->num_output_ = channel_out_;
kernel_w_ = param_.kernel[1];
kernel_h_ = param_.kernel[0];
stride_w_ = param_.stride[1];
stride_h_ = param_.stride[0];
pad_w_ = param_.pad[1];
pad_h_ = param_.pad[0];
int status;
size_t n, g;
size_t iw, ih, ic;
size_t ow, oh, oc;
size_t kw, kh;
size_t dimension = 4;
g = std::max(this->group_, 1);
n = this->num_;
iw = this->width_;
ih = this->height_;
ic = this->channels_;
ow = this->width_out_;
oh = this->height_out_;
oc = this->num_output_;
kw = this->kernel_w_;
kh = this->kernel_h_;
oc = this->num_output_;
size_t bdata_sizes[4] = { iw, ih, ic, n };
size_t bdata_strides[4] = { 1, iw, iw*ih, iw*ih*ic };
/* starting with MKL 2017 Gold in case of groups filter layout
* becomes 5D, i.e. groups become a separate dimension */
size_t g_mkl2017 = g;
size_t f_dimension = dimension + (g != 1);
if (getMKLBuildDate() < 20160701) {
g_mkl2017 = 1;
f_dimension = dimension;
}
size_t fdata_sizes[5] = { kw, kh, ic / g, oc / g_mkl2017, g_mkl2017 };
size_t fdata_strides[5] = { 1, kw, kw*kh, kw*kh*ic / g, kw*kh*ic / g*oc / g };
size_t bias_sizes[1] = { oc };
size_t bias_strides[1] = { 1 };
size_t tdata_sizes[4] = { ow, oh, oc, n };
size_t tdata_strides[4] = { 1, ow, ow*oh, ow*oh*oc };
size_t convolutionStrides[2] = { this->stride_w_, this->stride_h_ };
int inputOffset[2] = { -this->pad_w_, -this->pad_h_ };
// Names are for debugging purposes only.
/*** convolution section ***/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateForwardBias<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
} else {
status = dnnGroupsConvolutionCreateForward<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
}
CHECK_EQ(status, 0)
<< "Failed dnnCreateConvolution<DType>(dnnForward) with status "
<< status << "\n";
fwd_bottom_data->create_layouts(convolutionFwd, dnnResourceSrc, dimension,
bdata_sizes, bdata_strides);
fwd_top_data->create_layouts(convolutionFwd, dnnResourceDst, dimension,
tdata_sizes, tdata_strides);
fwd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
if (!param_.no_bias)
fwd_bias_data->create_layouts(convolutionFwd, dnnResourceBias, 1,
bias_sizes, bias_strides);
/*
* Backward by data layer setup
*/
status = dnnGroupsConvolutionCreateBackwardData<DType>(&convolutionBwdData,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardData with status "
<< status << "\n";
bwdd_bottom_diff->create_layouts(convolutionBwdData, dnnResourceDiffSrc,
dimension, bdata_sizes, bdata_strides);
bwdd_top_diff->create_layouts(convolutionBwdData, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by filter layer setup
*/
status = dnnGroupsConvolutionCreateBackwardFilter<DType>(&convolutionBwdFilter,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardFilter with status "
<< status << "\n";
bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceSrc,
dimension, bdata_sizes, bdata_strides);
bwdf_top_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdf_filter_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by bias layer setup
*/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateBackwardBias<DType>(&convolutionBwdBias,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
tdata_sizes);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardBias with status "
<< status << "\n";
bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, 1,
bias_sizes, bias_strides);
}
}
public:
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
DType *data_ptr = NULL;
DType *wmat_ptr = NULL;
DType *out_ptr = NULL;
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Tensor<xpu, 4, DType> out =
mkl_experimental_direct_get<xpu, 4, DType>(out_data[conv::kOut], s);
Tensor<xpu, 4, DType> wmat =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kWeight], s);
if (!init_mkldnn_) {
LayerSetUp(data, out);
init_mkldnn_ = true;
}
CHECK_EQ(data.CheckContiguous(), true);
CHECK_EQ(wmat.CheckContiguous(), true);
CHECK_EQ(out.CheckContiguous(), true);
data_ptr = data.dptr_;
wmat_ptr = wmat.dptr_;
out_ptr = out.dptr_;
int status;
void *res_convolutionFwd[dnnResourceNumber];
res_convolutionFwd[dnnResourceSrc] =
fwd_bottom_data->get_converted_prv(data_ptr, false, in_data[conv::kData]);
res_convolutionFwd[dnnResourceFilter] =
fwd_filter_data->get_converted_prv(wmat_ptr, true, in_data[conv::kWeight]);
if (!param_.no_bias) {
Tensor<xpu, 1, DType> bias =
mkl_experimental_direct_get<xpu, 1, DType>(in_data[conv::kBias], s);
res_convolutionFwd[dnnResourceBias] =
fwd_bias_data->get_converted_prv(bias.dptr_, true, in_data[conv::kBias]);
}
res_convolutionFwd[dnnResourceDst] = fwd_top_data->get_output_ptr(out_ptr,
fwd_top_data, out_data[conv::kOut]);
status = dnnExecute<DType>(convolutionFwd, res_convolutionFwd);
CHECK_EQ(status, 0) << "Forward convolution failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (fwd_top_data->conversion_needed()) {
fwd_top_data->convert_from_prv(out_ptr);
}
#endif
}
void AddToModeAllocAndStoreBuffer(void *src, int blob_size, Storage::Handle *pws) {
int blob_byte_size = blob_size * sizeof(DType);
*pws = Storage::Get()->Alloc(blob_byte_size, Context::CPU());
memcpy(pws->dptr, src, blob_byte_size);
}
void AddToModeAddAndReleaseBuffer(Storage::Handle *pws, void *dst_, int blob_size) {
DType *dst = reinterpret_cast<DType*>(dst_);
DType *src = reinterpret_cast<DType*>(pws->dptr);
#pragma omp parallel for
for (int i = 0; i < blob_size; i++) {
dst[i] += src[i];
}
if (pws->dptr)
Storage::Get()->Free(*pws);
pws->dptr = NULL;
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
if (param_.kernel.ndim() > 2) {
LOG(FATAL) << "Volume convolution is not implmented in mshadow";
}
CHECK_EQ(out_grad.size(), 1);
size_t expected = param_.no_bias == 0 ? 3 : 2;
CHECK(in_data.size() == expected && in_grad.size() == expected);
CHECK_EQ(req.size(), expected);
CHECK_EQ(in_data[conv::kWeight].CheckContiguous(), true);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Shape<3> wmat_shape =
Shape3(param_.num_group,
param_.num_filter / param_.num_group,
data.shape_[1] / param_.num_group * param_.kernel[0] * param_.kernel[1]);
Tensor<xpu, 3, DType> wmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_data[conv::kWeight], wmat_shape, s);
Tensor<xpu, 4, DType> grad =
mkl_experimental_direct_get<xpu, 4, DType>(out_grad[conv::kOut], s);
Tensor<xpu, 4, DType> gdata =
mkl_experimental_direct_get<xpu, 4, DType>(in_grad[conv::kData], s);
Tensor<xpu, 3, DType> gwmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_grad[conv::kWeight], wmat_shape, s);
if (!init_mkldnn_) {
init_mkldnn_ = true;
LayerSetUp(data, grad);
}
int status;
if (req[0]) {
void *res_convolutionBwdData[dnnResourceNumber];
res_convolutionBwdData[dnnResourceDiffDst] =
bwdd_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdData[dnnResourceFilter] =
bwdd_filter_data->get_converted_prv(wmat.dptr_, false, in_data[conv::kWeight]);
Storage::Handle addtoWorkspace;
if (req[0] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gdata.dptr_, in_grad[conv::kData].Size(), &addtoWorkspace);
}
res_convolutionBwdData[dnnResourceDiffSrc] = bwdd_bottom_diff->get_output_ptr(gdata.dptr_,
bwdd_bottom_diff, in_grad[conv::kData]);
status = dnnExecute<DType>(convolutionBwdData, res_convolutionBwdData);
CHECK_EQ(status, 0) << "Backward Data conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdd_bottom_diff->conversion_needed()) {
bwdd_bottom_diff->convert_from_prv(gdata.dptr_);
}
#endif
if (req[0] == kAddTo) {
if (bwdd_bottom_diff->conversion_needed()) {
bwdd_bottom_diff->convert_from_prv(gdata.dptr_);
}
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gdata.dptr_, in_grad[conv::kData].Size());
}
}
if (req[1]) {
void *res_convolutionBwdFilter[dnnResourceNumber];
res_convolutionBwdFilter[dnnResourceDiffDst] =
bwdf_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdFilter[dnnResourceSrc] =
bwdf_bottom_data->get_converted_prv(data.dptr_, false,
in_data[conv::kData]);
Storage::Handle addtoWorkspace;
if (req[1] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gwmat.dptr_, in_grad[conv::kWeight].Size(), &addtoWorkspace);
}
res_convolutionBwdFilter[dnnResourceDiffFilter] = bwdf_filter_diff->get_output_ptr(
gwmat.dptr_, bwdf_filter_diff, in_grad[conv::kWeight]);
status = dnnExecute<DType>(convolutionBwdFilter, res_convolutionBwdFilter);
CHECK_EQ(status, 0) << "Backward Filter conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdf_filter_diff->conversion_needed()) {
bwdf_filter_diff->convert_from_prv(gwmat.dptr_);
}
#endif
if (req[1] == kAddTo) {
if (bwdf_filter_diff->conversion_needed()) {
bwdf_filter_diff->convert_from_prv(gwmat.dptr_);
}
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gwmat.dptr_, in_grad[conv::kWeight].Size());
}
}
if (!param_.no_bias) {
Tensor<xpu, 1, DType> gbias =
mkl_experimental_direct_get<xpu, 1, DType>(in_grad[conv::kBias], s);
void *res_convolutionBwdBias[dnnResourceNumber];
res_convolutionBwdBias[dnnResourceDiffDst] =
bwdb_top_diff->get_converted_prv(grad.dptr_, true, out_grad[conv::kOut]);
res_convolutionBwdBias[dnnResourceDiffBias] = bwdb_bias_diff->get_output_ptr(gbias.dptr_,
bwdb_bias_diff, in_grad[conv::kBias]);
status = dnnExecute<DType>(convolutionBwdBias, res_convolutionBwdBias);
CHECK_EQ(status, 0) << "Backward Bias failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdb_bias_diff->conversion_needed()) {
bwdb_bias_diff->convert_from_prv(gbias.dptr_);
}
#endif
}
}
private:
ConvolutionParam param_;
size_t width_,
height_,
width_out_,
height_out_,
kernel_w_,
kernel_h_,
stride_w_,
stride_h_;
int group_,
num_,
num_output_;
size_t channels_;
int pad_w_,
pad_h_;
bool init_mkldnn_;
dnnPrimitive_t convolutionFwd;
dnnPrimitive_t convolutionBwdData;
dnnPrimitive_t convolutionBwdFilter;
dnnPrimitive_t convolutionBwdBias;
/* Fwd step */
std::shared_ptr<MKLData<DType> > fwd_bottom_data, fwd_top_data, fwd_filter_data,
fwd_bias_data;
/* Bwd data step */
std::shared_ptr<MKLData<DType> > bwdd_top_diff, bwdd_bottom_diff;
std::shared_ptr<MKLData<DType> > bwdd_filter_data;
/* Bwd filter step */
std::shared_ptr<MKLData<DType> > bwdf_top_diff, bwdf_filter_diff;
std::shared_ptr<MKLData<DType> > bwdf_bottom_data;
std::shared_ptr<MKLData<DType> > bwdf_filter_diff_iter, bwdf2fwd_filter_diff,
bwdb_bias_diff_iter;
/* Bwd bias step */
std::shared_ptr<MKLData<DType> > bwdb_top_diff, bwdb_bias_diff;
}; // class ConvolutionOp
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.