source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
DRB024-simdtruedep-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has data races due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@66:5 vs. a[i]@66:12
*/
#include <stdio.h>
int main(int argc, char * argv[])
{
int i;
int len = 100;
int a[100], b[100];
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
b[i]=(i+1);
}
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<(len-1); i ++ )
{
a[i+1]=(a[i]+b[i]);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
printf("i=%d a[%d]=%d\n", i, i, a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
bug_proxy_task_dep_waiting.c | // RUN: %libomp-compile-and-run
// The runtime currently does not get dependency information from GCC.
// UNSUPPORTED: gcc, icc-16
// REQUIRES: !abt
#include <stdio.h>
#include <omp.h>
#include <pthread.h>
#include "omp_my_sleep.h"
/*
An explicit task can have a dependency on a target task. If it is not
directly satisfied, the runtime should not wait but resume execution.
*/
// Compiler-generated code (emulation)
typedef long kmp_intptr_t;
typedef int kmp_int32;
typedef char bool;
typedef struct ident {
kmp_int32 reserved_1; /**< might be used in Fortran; see above */
kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */
kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */
#if USE_ITT_BUILD
/* but currently used for storing region-specific ITT */
/* contextual information. */
#endif /* USE_ITT_BUILD */
kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */
char const *psource; /**< String describing the source location.
The string is composed of semi-colon separated fields which describe the source file,
the function and a pair of line numbers that delimit the construct.
*/
} ident_t;
typedef struct kmp_depend_info {
kmp_intptr_t base_addr;
size_t len;
struct {
bool in:1;
bool out:1;
} flags;
} kmp_depend_info_t;
struct kmp_task;
typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * );
typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */
void * shareds; /**< pointer to block of pointers to shared vars */
kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */
kmp_int32 part_id; /**< part id for the task */
} kmp_task_t;
#ifdef __cplusplus
extern "C" {
#endif
kmp_int32 __kmpc_global_thread_num ( ident_t * );
kmp_task_t*
__kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t, size_t sizeof_shareds,
kmp_routine_entry_t task_entry );
void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask );
kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task,
kmp_int32 ndeps, kmp_depend_info_t *dep_list,
kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list );
kmp_int32
__kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task );
#ifdef __cplusplus
}
#endif
void *target(void *task)
{
my_sleep( 0.1 );
__kmpc_proxy_task_completed_ooo((kmp_task_t*) task);
return NULL;
}
pthread_t target_thread;
// User's code
int task_entry(kmp_int32 gtid, kmp_task_t *task)
{
pthread_create(&target_thread, NULL, &target, task);
return 0;
}
int main()
{
int dep;
/*
* Corresponds to:
#pragma omp target nowait depend(out: dep)
{
my_sleep( 0.1 );
}
*/
kmp_depend_info_t dep_info;
dep_info.base_addr = (long) &dep;
dep_info.len = sizeof(int);
// out = inout per spec and runtime expects this
dep_info.flags.in = 1;
dep_info.flags.out = 1;
kmp_int32 gtid = __kmpc_global_thread_num(NULL);
kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry);
__kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL);
int first_task_finished = 0;
#pragma omp task shared(first_task_finished) depend(inout: dep)
{
first_task_finished = 1;
}
int second_task_finished = 0;
#pragma omp task shared(second_task_finished) depend(in: dep)
{
second_task_finished = 1;
}
// check that execution has been resumed and the runtime has not waited
// for the dependencies to be satisfied.
int error = (first_task_finished == 1);
error += (second_task_finished == 1);
#pragma omp taskwait
// by now all tasks should have finished
error += (first_task_finished != 1);
error += (second_task_finished != 1);
return error;
}
|
GB_unop__identity_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MatrixVectorOpt.c | /******************************************************************************
* Example - Matrix-vector multiplication - C/C++ Version
* FILE: MatrixVector.c
* DESCRIPTION:
* This example multiplies matrix A with a vector element and
* stores the summed products in vector c.
******************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
#define SIZE 1000
main()
{
float A[SIZE][SIZE], b[SIZE], c[SIZE];
int i, j;
double fTimeStart, fTimeEnd;
/* Initializations */
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
/* fminf(x,y) gives the minimum of x and y */
A[i][j] = fminf(i*1.0/(j+1.0),j*1.0/(i+1.0));
b[i] = 1.0 * (i+1);
c[i] = 0.0;
}
fTimeStart = omp_get_wtime();
#pragma omp parallel for private(j)
for (i=0; i < SIZE; i++)
for (j=0; j < SIZE; j++)
c[i] = c[i] + A[j][i] * b[i];
fTimeEnd = omp_get_wtime();
printf(" wall clock time = %.20f\n", fTimeEnd - fTimeStart);
}
|
kernel.c | #include "SimpleMOC-kernel_header.h"
void run_kernel( Input * I, Source * S, Table * table)
{
// Enter Parallel Region
#pragma omp parallel default(none) shared(I, S, table)
{
#ifdef OPENMP
int thread = omp_get_thread_num();
#else
int thread = 0;
#endif
// Create Thread Local Random Seed
unsigned int seed = time(NULL) * (thread+1);
// Allocate Thread Local SIMD Vectors (align if using intel compiler)
#ifdef INTEL
SIMD_Vectors simd_vecs = aligned_allocate_simd_vectors(I);
float * state_flux = (float *) _mm_malloc(
I->egroups * sizeof(float), 64);
#else
SIMD_Vectors simd_vecs = allocate_simd_vectors(I);
float * state_flux = (float *) malloc(
I->egroups * sizeof(float));
#endif
// Allocate Thread Local Flux Vector
for( int i = 0; i < I->egroups; i++ )
state_flux[i] = (float) rand_r(&seed) / RAND_MAX;
// Initialize PAPI Counters (if enabled)
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#pragma omp critical
{
counter_init(&eventset, &num_papi_events, I);
}
#endif
// Enter OMP For Loop over Segments
#pragma omp for schedule(dynamic,100)
for( long i = 0; i < I->segments; i++ )
{
// Pick Random QSR
int QSR_id = rand_r(&seed) % I->source_3D_regions;
// Pick Random Fine Axial Interval
int FAI_id = rand_r(&seed) % I->fine_axial_intervals;
// Attenuate Segment
attenuate_segment( I, S, QSR_id, FAI_id, state_flux,
&simd_vecs, table);
}
// Stop PAPI Counters
#ifdef PAPI
if( thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#pragma omp barrier
}
counter_stop(&eventset, num_papi_events, I);
#endif
}
}
void attenuate_segment( Input * restrict I, Source * restrict S,
int QSR_id, int FAI_id, float * restrict state_flux,
SIMD_Vectors * restrict simd_vecs, Table * restrict table)
{
// Unload local vector vectors
float * restrict q0 = simd_vecs->q0;
float * restrict q1 = simd_vecs->q1;
float * restrict q2 = simd_vecs->q2;
float * restrict sigT = simd_vecs->sigT;
float * restrict tau = simd_vecs->tau;
float * restrict sigT2 = simd_vecs->sigT2;
float * restrict expVal = simd_vecs->expVal;
float * restrict reuse = simd_vecs->reuse;
float * restrict flux_integral = simd_vecs->flux_integral;
float * restrict tally = simd_vecs->tally;
float * restrict t1 = simd_vecs->t1;
float * restrict t2 = simd_vecs->t2;
float * restrict t3 = simd_vecs->t3;
float * restrict t4 = simd_vecs->t4;
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
const float dz = 0.1f;
const float zin = 0.3f;
const float weight = 0.5f;
const float mu = 0.9f;
const float mu2 = 0.3f;
const float ds = 0.7f;
const int egroups = I->egroups;
// load fine source region flux vector
float * FSR_flux = &S[QSR_id].fine_flux[FAI_id * egroups];
if( FAI_id == 0 )
{
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y2 = f2[g];
const float y3 = f3[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else if ( FAI_id == I->fine_axial_intervals - 1 )
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
const float y3 = f3[g];
// do quadratic "fitting"
const float c0 = y2;
const float c1 = (y1 - y3) / (2.f*dz);
const float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0[g] = c0 + c1*zin + c2*zin*zin;
q1[g] = c1 + 2.f*c2*zin;
q2[g] = c2;
}
}
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load total cross section
sigT[g] = S[QSR_id].sigT[g];
// calculate common values for efficiency
tau[g] = sigT[g] * ds;
sigT2[g] = sigT[g] * sigT[g];
}
// cycle over energy groups
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
#ifdef TABLE
expVal[g] = interpolateTable( table, tau[g] );
#else
expVal[g] = 1.f - expf( -tau[g] ); // exp is faster on many architectures
#endif
}
// Flux Integral
// Re-used Term
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g]
/ (sigT[g] * sigT2[g]);
}
//#pragma vector alignednontemporal
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// add contribution to new source flux
flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * state_flux[g] - q0[g])
* expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2
* (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g])
/ (3.f * sigT2[g] * sigT2[g]);
}
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// Prepare tally
tally[g] = weight * flux_integral[g];
}
#ifdef OPENMP
omp_set_lock(S[QSR_id].locks + FAI_id);
#endif
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
FSR_flux[g] += tally[g];
}
#ifdef OPENMP
omp_unset_lock(S[QSR_id].locks + FAI_id);
#endif
// Term 1
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t1[g] = q0[g] * expVal[g] / sigT[g];
}
// Term 2
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g];
}
// Term 3
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t3[g] = q2[g] * mu2 * reuse[g];
}
// Term 4
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t4[g] = state_flux[g] * (1.f - expVal[g]);
}
// Total psi
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
state_flux[g] = t1[g] + t2[g] + t3[g] + t4[g];
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
float interpolateTable( Table * restrict table, float x)
{
// check to ensure value is in domain
if( x > table->maxVal )
return 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
/*
if( interval >= table->N || interval < 0)
{
printf( "Interval = %d\n", interval);
printf( "N = %d\n", table->N);
printf( "x = %f\n", x);
printf( "dx = %f\n", table->dx);
exit(1);
}
*/
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
return val;
}
}
|
parallel_master_taskloop_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp parallel master taskloop
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop'}}
#pragma omp parallel master taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop'}}
#pragma omp parallel master taskloop foo
void test_no_clause() {
int i;
#pragma omp parallel master taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel master taskloop' must be a for loop}}
#pragma omp parallel master taskloop
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel master taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i, a;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
#pragma omp parallel master taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp parallel master taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp parallel master taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'in_reduction' in directive '#pragma omp parallel master taskloop'}}
#pragma omp parallel master taskloop in_reduction(+:a)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
#pragma omp parallel master taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel master taskloop'}}
#pragma omp parallel
#pragma omp parallel master taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
#pragma omp parallel master taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
#pragma omp parallel master taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
#pragma omp parallel master taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel master taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel master taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp parallel master taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/timer-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
register size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict clip_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
clip_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
double
mask_alpha;
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
mask_alpha=QuantumScale*GetPixelIntensity(image,r);
if (fabs(mask_alpha) >= MagickEpsilon)
{
SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red,
(MagickRealType) GetPixelOpacity(q)));
SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green,
(MagickRealType) GetPixelOpacity(q)));
SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue,
(MagickRealType) GetPixelOpacity(q)));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
return(i < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *)image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=GetMagickTime();
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
image->type=UndefinedType;
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
virtual_pixel;
register const IndexPacket
*magick_restrict virtual_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
q=pixels;
indexes=nexus_info->indexes;
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=(IndexPacket) 0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (fabs(alpha-TransparentOpacity) < MagickEpsilon)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict mask_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
mask_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,mask_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
mask_nexus=DestroyPixelCacheNexus(mask_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrcit cache_info,const MapMode mode,
% const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (PixelPacket *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
status=MagickTrue;
if (nexus_info->cache == (PixelPacket *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
{
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
return((PixelPacket *) NULL);
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
nested-2.c | #include <omp.h>
#include <stdlib.h>
int
main (void)
{
int i = -1, j = -1;
omp_set_nested (0);
omp_set_dynamic (0);
#pragma omp parallel num_threads (4)
{
#pragma omp single
{
i = omp_get_thread_num () + omp_get_num_threads () * 256;
#pragma omp parallel num_threads (2)
{
#pragma omp single
{
j = omp_get_thread_num () + omp_get_num_threads () * 256;
}
}
}
}
if (i < 4 * 256 || i >= 4 * 256 + 4)
abort ();
if (j != 256 + 0)
abort ();
return 0;
}
|
broadcasting.h | /*
* broadcasting.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef BROADCASTING_H_
#define BROADCASTING_H_
#include <dll.h>
#include <sharedmem.h>
#include <shape.h>
#include <op.h>
#include <templatemath.h>
#include <helper_cuda.h>
#include <pairwise_util.h>
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifdef __JNI__
#include <jni.h>
#endif
namespace functions {
namespace broadcast {
/**
* Broadcast operation
* for broadcasting a smaller tensor
* along long a bigger one.
*/
template<typename T>
class Broadcast: public functions::ops::Op<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __device__ __host__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) = 0;
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __device__ __host__
#elif defined(__GNUC__)
#endif
T op(T d1) = 0;
#ifdef __CUDACC__
__inline__ __device__ void transformCuda(
T *x,
int *xShapeInfo,
T *y,
int *yShapeInfo,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, int *tadOffsets) {
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ shape::TAD *tad;
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ int yStride;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
yStride = shape::elementWiseStride(yShapeInfo);
}
__syncthreads();
/* __shared__ int rank;
__shared__ int tadEWS;
__shared__ int yStride;
if (threadIdx.x == 0) {
tad = new(manager->getTADSpace()) shape::TAD(); //(xShapeInfo,dimension,dimensionLength)
tad->setExternalBuffers((void *) manager);
tad->initWithExternalTAD(tadOnlyShapeInfo, xShapeInfo, dimension, dimensionLength);
//tad->init(xShapeInfo,dimension,dimensionLength);
//tad->createTadOnlyShapeInfo();
rank = shape::rank(tad->tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tad->tadOnlyShapeInfo);
yStride = shape::elementWiseStride(yShapeInfo);
}
__syncthreads();
*/
//int *xCoord = shape::cuMalloc(manager->getSharedCoordBuffer(), rank);
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
int tadOffsetForBlock = tadOffsets[r];
if(tadEWS > 0) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) {
// now we need coords for both X, Y. Z is uses the same coord as X in this case
// Y is always vector, however it might be stided
result[tadOffsetForBlock + i * tadEWS] = this->op(x[tadOffsetForBlock + i * tadEWS], y[i * yStride]);
}
}
else {
int xCoord[MAX_RANK];
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
result[xOffset] = this->op(x[xOffset], y[i * yStride]);
}
}
__syncthreads();
}
}
#endif
/**
* CPU execution
* @param x the input
* @param xShapeInfo the x shape information
* @param y the y data
* @param yShapeInfo the y shape information
* @param result the result
* @param resultShapeInfo the result shape information
* @param dimension the dimension to broadcast along long
* @param dimensionLength the length of the dimension buffer
*/
virtual void exec(T *x,
int *xShapeInfo,
T *y,
int *yShapeInfo,
T *result,
int *dimension,
int dimensionLength) {
shape::TAD tad(xShapeInfo,dimension,dimensionLength);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
int *tadShapeShapeInfo = tad.tadOnlyShapeInfo;
int tads = tad.numTads;
int *xShape = shape::shapeOf(tadShapeShapeInfo);
int *xStride = shape::stride(tadShapeShapeInfo);
int *resultStride = shape::stride(tadShapeShapeInfo);
if (result == x) {
#pragma omp parallel for schedule(guided)
for (int i = 0; i < tads; i++) {
int offset = tad.tadOffsets[i];
T *xIter = x + offset;
T *resultIter = result + offset;
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int resultStridesIter[MAX_RANK];
int rank = shape::rank(tadShapeShapeInfo);
int vectorIdx = 0;
if (PrepareTwoRawArrayIter<T>(rank,
xShape,
xIter,
xStride,
resultIter,
resultStride,
&rank,
shapeIter,
&xIter,
xStridesIter,
&resultIter,
resultStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter);
{
/* Process the innermost dimension */
T val = this->op(xIter[0], y[vectorIdx]);
// printf("TAD %d x %f and y %f with vector idx %d and result %f\n",i,xIter[0],y[vectorIdx],vectorIdx,val);
xIter[0] = val;
vectorIdx += shape::elementWiseStride(yShapeInfo);
}
ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
xIter,
xStridesIter,
resultIter,
resultStridesIter);
}
}
}
else {
#pragma omp parallel for schedule(guided)
for (int i = 0; i < tads; i++) {
int offset = tad.tadOffsets[i];
T *xIter = x + offset;
T *resultIter = result + offset;
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int resultStridesIter[MAX_RANK];
int rank = shape::rank(tadShapeShapeInfo);
int vectorIdx = 0;
if (PrepareTwoRawArrayIter<T>(rank,
xShape,
xIter,
xStride,
resultIter,
resultStride,
&rank,
shapeIter,
&xIter,
xStridesIter,
&resultIter,
resultStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter);
{
/* Process the innermost dimension */
T val = this->op(xIter[0], y[vectorIdx]);
resultIter[0] = val;
vectorIdx += shape::elementWiseStride(yShapeInfo);
}
ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
xIter,
xStridesIter,
resultIter,
resultStridesIter);
}
}
}
}
virtual inline
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
//no extra params aggregation needs to happen
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Broadcast() {
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
Broadcast() {
}
};
namespace ops {
template<typename T>
class Add: public functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d1 + d2;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Add() {
}
};
template<typename T>
class Copy: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d2;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Copy() {
}
};
template<typename T>
class Divide: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d1 / d2;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Divide() {
}
};
template<typename T>
class Multiply: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d1 * d2;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Multiply() {
}
};
template<typename T>
class ReverseDivide: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d2 / d1;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~ReverseDivide() {
}
};
template<typename T>
class ReverseSubtract: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d2 - d1;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~ReverseSubtract() {
}
};
template<typename T>
class Subtract: public virtual functions::broadcast::Broadcast<T> {
public:
/**
*
* @param d1
* @param d2
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1, T d2) {
return d1 - d2;
}
/**
*
* @param d1
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T op(T d1) {
return d1;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Subtract() {
}
};
}
template<typename T>
class BroadcastOpFactory {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
BroadcastOpFactory() {
}
/**
* creates an operation
* @param op the op number to create:
* 0: Add
* 1: Subtract
* 2: Multiply
* 3: Divide
* 4: ReverseDivide
* 5: Reverse Subtract
* 6: Copy
* @return the broadcast operation
*/
#ifdef __CUDACC__
__inline__ __device__
Broadcast<T> * getOp(int op, unsigned char *buffer) {
#else
Broadcast<T> * getOp(int op) {
#endif
if (op == 0) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::Add<T>();
#else
return new functions::broadcast::ops::Add<T>();
#endif
} else if (op == 1) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::Subtract<T>();
#else
return new functions::broadcast::ops::Subtract<T>();
#endif
} else if (op == 2) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::Multiply<T>();
#else
return new functions::broadcast::ops::Multiply<T>();
#endif
} else if (op == 3) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::Divide<T>();
#else
return new functions::broadcast::ops::Divide<T>();
#endif
} else if (op == 4) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::ReverseDivide<T>();
#else
return new functions::broadcast::ops::ReverseDivide<T>();
#endif
} else if (op == 5) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::ReverseSubtract<T>();
#else
return new functions::broadcast::ops::ReverseSubtract<T>();
#endif
} else if (op == 6) {
#ifdef __CUDACC__
return new(buffer) functions::broadcast::ops::Copy<T>();
#else
return new functions::broadcast::ops::Copy<T>();
#endif
}
return nullptr;
}
};
}
}
#ifdef __CUDACC__
/**
* Meant to be called from an external interface
* and the driver api
* @param opNum the op number to execute
* @param x the input data
* @param xShapeInfo the x shape info for input
* @param y the y to broadcast
* @param yShapeInfo the shape information of the broadcast info
* @param result the result buffer
* @param resultShapeInfo the shape information for the result buffer
* @param dimension the dimension(s) to do broadcast along long
* @param dimensionLength the length of the dimension buffer
* @param gpuInformation the gpu information such as blockdim,griddim and shared
* memory size
*/
template <typename T>
__device__ void broadcastGeneric(
int opNum,
T *x,
int *xShapeInfo,
int xRank,
T *y,
int *yShapeInfo,
int yRank,
T *result,
int *resultShapeInfo,
int zRank,
int *dimension,
int dimensionLength, int *tadOnlyShapeInfo, int *tadOffsets) {
__shared__ functions::broadcast::Broadcast<T> *op;
__shared__ functions::broadcast::BroadcastOpFactory<T> *newOpFactory;
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::broadcast::BroadcastOpFactory<T>), sizeof(functions::broadcast::Broadcast<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
/*
__shared__ int *ptrSharedXShapeInfo;
__shared__ int *ptrSharedYShapeInfo;
__shared__ int *ptrSharedZShapeInfo;
if (xShapeInfo != nullptr) {
shape::sweepShapeInfoBuffer(xShapeInfo, manager->getXShapeBuffer());
if (threadIdx.x == 0) ptrSharedXShapeInfo = manager->getXShapeBuffer();
} else if (threadIdx.x == 0) ptrSharedXShapeInfo = nullptr;
if (yShapeInfo != nullptr) {
shape::sweepShapeInfoBuffer(yShapeInfo, manager->getYShapeBuffer());
if (threadIdx.x == 0) ptrSharedYShapeInfo = manager->getYShapeBuffer();
} else if (threadIdx.x == 0) ptrSharedYShapeInfo = nullptr;
if (resultShapeInfo != nullptr) {
shape::sweepShapeInfoBuffer(resultShapeInfo, manager->getZShapeBuffer());
if (threadIdx.x == 0) ptrSharedZShapeInfo = manager->getZShapeBuffer();
} else if (threadIdx.x == 0) ptrSharedZShapeInfo = nullptr;
*/
if(threadIdx.x == 0) {
newOpFactory = new(manager->getFactorySpace()) functions::broadcast::BroadcastOpFactory<T>();
op = newOpFactory->getOp(opNum, manager->getFunctionSpace());
}
__syncthreads();
op->transformCuda(
x,
xShapeInfo,
y,
yShapeInfo,
result,
resultShapeInfo,
dimension,
dimensionLength, manager, tadOnlyShapeInfo, tadOffsets);
}
/**
* Meant to be called from an external interface
* and the driver api
* @param opNum the op number to execute
* @param x the input data
* @param xShapeInfo the x shape info for input
* @param y the y to broadcast
* @param yShapeInfo the shape information of the broadcast info
* @param result the result buffer
* @param resultShapeInfo the shape information for the result buffer
* @param dimension the dimension(s) to do broadcast along long
* @param dimensionLength the length of the dimension buffer
* @param gpuInformation the gpu information such as blockdim,griddim and shared
* memory size
*/
extern "C" __global__ void broadcastDouble(
int opNum,
double *x, int *xShapeInfo, int xRank,
double *y, int *yShapeInfo, int yRank,
double *result, int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength, int *tadOnlyShapeInfo, int *tadOffsets) {
broadcastGeneric<double>(
opNum,
x,
xShapeInfo, xRank,
y,
yShapeInfo, yRank,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets);
}
/**
* Meant to be called from an external interface
* and the driver api
* @param opNum the op number to execute
* @param x the input data
* @param xShapeInfo the x shape info for input
* @param y the y to broadcast
* @param yShapeInfo the shape information of the broadcast info
* @param result the result buffer
* @param resultShapeInfo the shape information for the result buffer
* @param dimension the dimension(s) to do broadcast along long
* @param dimensionLength the length of the dimension buffer
* @param gpuInformation the gpu information such as blockdim,griddim and shared
* memory size
*/
extern "C" __global__ void broadcastFloat(
int opNum,
float *x, int *xShapeInfo, int xRank,
float *y, int *yShapeInfo, int yRank,
float *result, int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength, int *tadOnlyShapeInfo, int *tadOffsets) {
broadcastGeneric<float>(
opNum,
x,
xShapeInfo, xRank,
y,
yShapeInfo, yRank,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets);
}
#endif
#endif /* BROADCASTING_H_ */
|
convolution_sgemm_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size / 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x12
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
img0 += size * 4;
tmpptr += 48;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
img0 += size * 4;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
img0 += size * 4;
tmpptr += 16;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x2
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
__msa_st_w((v4i32)_r01_0, tmpptr, 0);
__msa_st_w((v4i32)_r01_1, tmpptr + 4, 0);
img0 += size * 4;
tmpptr += 8;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(img0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
v4f32 _sum1 = _sum0;
v4f32 _sum2 = _sum0;
v4f32 _sum3 = _sum0;
v4f32 _sum4 = _sum0;
v4f32 _sum5 = _sum0;
v4f32 _sum6 = _sum0;
v4f32 _sum7 = _sum0;
v4f32 _sum8 = _sum0;
v4f32 _sum9 = _sum0;
v4f32 _suma = _sum0;
v4f32 _sumb = _sum0;
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 48);
__builtin_prefetch(kptr0 + 16);
v4i32 _val0123 = __msa_ld_w(tmpptr, 0);
v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0);
v4i32 _val89ab = __msa_ld_w(tmpptr + 8, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
_sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0);
_sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0);
_suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0);
_sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0);
tmpptr += 12;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0);
__msa_st_w((v4i32)_sum8, outptr0 + 4 * 8, 0);
__msa_st_w((v4i32)_sum9, outptr0 + 4 * 9, 0);
__msa_st_w((v4i32)_suma, outptr0 + 4 * 10, 0);
__msa_st_w((v4i32)_sumb, outptr0 + 4 * 11, 0);
outptr0 += 4 * 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
v4f32 _sum1 = _sum0;
v4f32 _sum2 = _sum0;
v4f32 _sum3 = _sum0;
v4f32 _sum4 = _sum0;
v4f32 _sum5 = _sum0;
v4f32 _sum6 = _sum0;
v4f32 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 16);
v4i32 _val0123 = __msa_ld_w(tmpptr, 0);
v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
tmpptr += 8;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0);
outptr0 += 4 * 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
v4f32 _sum1 = _sum0;
v4f32 _sum2 = _sum0;
v4f32 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 16);
v4i32 _val0123 = __msa_ld_w(tmpptr, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
tmpptr += 4;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
outptr0 += 4 * 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
v4f32 _sum1 = _sum0;
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 8);
__builtin_prefetch(kptr0 + 16);
v4f32 _val0 = __msa_fill_w_f32(*tmpptr++);
v4f32 _val1 = __msa_fill_w_f32(*tmpptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
_sum1 = __msa_fmadd_w(_sum1, _val1, _w0);
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 4 * 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 4);
__builtin_prefetch(kptr0 + 16);
v4f32 _val0 = __msa_fill_w_f32(*tmpptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
kptr0 += 4;
}
__msa_st_w((v4i32)_sum, outptr0, 0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
__msa_st_w((v4i32)_val, ptr, 0);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case ReadMaskPixelChannel:
{
destination_image->read_mask=MagickTrue;
break;
}
case WriteMaskPixelChannel:
{
destination_image->write_mask=MagickTrue;
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask | (1 << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
register ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelGray(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(separate_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
continue;
}
SetPixelChannel(separate_image,GrayPixelChannel,0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImage)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1 << channel),exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
register ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
/*
Set alpha channel by shape.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=UpdatePixelTrait;
(void) SetImageMask(image,WritePixelMask,image,exception);
(void) LevelImageColors(image,&image->background_color,
&image->background_color,MagickTrue,exception);
(void) SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
dm.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "dm.kernel_inc.h"
int openmp_dm_1st_eqn_right_init (openmp_pscmc_env * pe ,openmp_dm_1st_eqn_right_struct * kerstr ){
return 0 ;}
void openmp_dm_1st_eqn_right_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_1st_eqn_right_struct ));
}
int openmp_dm_1st_eqn_right_get_num_compute_units (openmp_dm_1st_eqn_right_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_1st_eqn_right_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_1st_eqn_right_exec (openmp_dm_1st_eqn_right_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_1st_eqn_right_scmc_kernel ( ( kerstr )->phi_out , ( kerstr )->phi_in , ( kerstr )->phi_1 , ( kerstr )->A1 , ( kerstr )->A2 , ( kerstr )->A3 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_1st_eqn_right_scmc_set_parameter_phi_out (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_out = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_phi_in (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_phi_1 (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_1 = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_A1 (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_A2 (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A2 = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_A3 (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A3 = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_y_cpu_core (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_numvec (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_XLEN (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_YLEN (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_ZLEN (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_ovlp (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_xblock (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_yblock (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_zblock (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_num_ele (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_num_ele_A1 (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_DT (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_M (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_Q (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_1st_eqn_right_scmc_set_parameter_DM_A (openmp_dm_1st_eqn_right_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_1st_eqn_core_init (openmp_pscmc_env * pe ,openmp_dm_1st_eqn_core_struct * kerstr ){
return 0 ;}
void openmp_dm_1st_eqn_core_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_1st_eqn_core_struct ));
}
int openmp_dm_1st_eqn_core_get_num_compute_units (openmp_dm_1st_eqn_core_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_1st_eqn_core_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_1st_eqn_core_exec (openmp_dm_1st_eqn_core_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_1st_eqn_core_scmc_kernel ( ( kerstr )->phi_out , ( kerstr )->phi_in , ( kerstr )->phi_1 , ( kerstr )->A1 , ( kerstr )->A2 , ( kerstr )->A3 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_1st_eqn_core_scmc_set_parameter_phi_out (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_out = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_phi_in (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_phi_1 (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_1 = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_A1 (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_A2 (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A2 = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_A3 (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A3 = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_y_cpu_core (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_numvec (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_XLEN (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_YLEN (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_ZLEN (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_ovlp (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_xblock (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_yblock (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_zblock (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_num_ele (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_num_ele_A1 (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_DT (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_M (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_Q (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_1st_eqn_core_scmc_set_parameter_DM_A (openmp_dm_1st_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_init (openmp_pscmc_env * pe ,openmp_dm_1st_eqn_fdtd_struct * kerstr ){
return 0 ;}
void openmp_dm_1st_eqn_fdtd_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_1st_eqn_fdtd_struct ));
}
int openmp_dm_1st_eqn_fdtd_get_num_compute_units (openmp_dm_1st_eqn_fdtd_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_1st_eqn_fdtd_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_1st_eqn_fdtd_exec (openmp_dm_1st_eqn_fdtd_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_1st_eqn_fdtd_scmc_kernel ( ( kerstr )->phi_out , ( kerstr )->phi_in , ( kerstr )->phi_1 , ( kerstr )->A1 , ( kerstr )->A2 , ( kerstr )->A3 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_phi_out (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_out = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_phi_in (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_phi_1 (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_1 = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_A1 (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_A2 (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A2 = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_A3 (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A3 = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_y_cpu_core (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_numvec (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_XLEN (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_YLEN (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_ZLEN (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_ovlp (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_xblock (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_yblock (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_zblock (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_num_ele (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_num_ele_A1 (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_DT (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_M (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_Q (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_1st_eqn_fdtd_scmc_set_parameter_DM_A (openmp_dm_1st_eqn_fdtd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
|
watchpoint_support.c | //
// WatchPointDriver.cpp
//
//
// Created by Milind Chabbi on 2/21/17.
//
//
#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE
#endif
#include <asm/unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/kernel.h>
#include <signal.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <ucontext.h>
#include <unistd.h>
#include <sys/mman.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <strings.h>
#include <asm/prctl.h>
#include <sys/prctl.h>
#include "common.h"
#include <hpcrun/main.h>
#include <hpcrun/hpcrun_options.h>
#include <hpcrun/write_data.h>
#include <hpcrun/safe-sampling.h>
#include <hpcrun/hpcrun_stats.h>
#include <hpcrun/memory/mmap.h>
#include <hpcrun/cct/cct.h>
#include <hpcrun/metrics.h>
#include <hpcrun/sample_event.h>
#include <hpcrun/sample_sources_registered.h>
#include <hpcrun/thread_data.h>
#include <hpcrun/trace.h>
#include <lush/lush-backtrace.h>
#include <messages/messages.h>
#include <utilities/tokenize.h>
#include <utilities/arch/context-pc.h>
#include <unwind/common/unwind.h>
#include "watchpoint_support.h"
#include <unwind/x86-family/x86-misc.h>
#define MAX_WP_SLOTS (5)
#define IS_ALIGNED(address, alignment) (!((size_t)(address) & (alignment - 1)))
#define ADDRESSES_OVERLAP(addr1, len1, addr2, len2) (((addr1) + (len1) > (addr2)) && ((addr2) + (len2) > (addr1)))
#define CACHE_LINE_SIZE (64)
//#define ALT_STACK_SZ (4 * SIGSTKSZ)
#define ALT_STACK_SZ ((1L << 20) > 4 * SIGSTKSZ ? (1L << 20) : 4 * SIGSTKSZ)
//#define TEST
#ifdef TEST
#define EMSG(...) fprintf(stderr, __VA_ARGS__)
#define hpcrun_abort() abort()
#define hpcrun_safe_exit() (1)
#define hpcrun_safe_enter() (1)
#define hpcrun_context_pc(context) (0)
#define get_previous_instruction(ip, pip) (0)
#define get_mem_access_length_and_type(a, b, c) (0)
#endif
#if defined(PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_UPDATE_BREAKPOINT)
#elif defined(PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#define FAST_BP_IOC_FLAG (PERF_EVENT_IOC_MODIFY_ATTRIBUTES)
#else
#endif
#define CHECK(x) ({int err = (x); \
if (err) { \
EMSG("%s: Failed with %d on line %d of file %s\n", strerror(errno), err, __LINE__, __FILE__); \
monitor_real_abort(); }\
err; })
#define HANDLE_ERROR_IF_ANY(val, expected, errstr) \
{ \
if (val != expected) \
{ \
perror(errstr); \
abort(); \
} \
}
#define SAMPLES_POST_FULL_RESET_VAL (1)
WPConfig_t wpConfig;
//const WatchPointInfo_t dummyWPInfo = {.sample = {}, .startTime =0, .fileHandle= -1, .isActive= false, .mmapBuffer=0};
//const struct DUMMY_WATCHPOINT dummyWP[MAX_WP_SLOTS];
// Data structure that is given by clients to set a WP
typedef struct ThreadData
{
int lbrDummyFD __attribute__((aligned(CACHE_LINE_SZ)));
stack_t ss;
void *fs_reg_val;
void *gs_reg_val;
long numWatchpointTriggers;
long numWatchpointImpreciseIP;
long numWatchpointImpreciseAddressArbitraryLength;
long numWatchpointImpreciseAddress8ByteLength;
long numSampleTriggeringWatchpoints;
long numWatchpointDropped;
long numInsaneIP;
struct drand48_data randBuffer;
WatchPointInfo_t watchPointArray[MAX_WP_SLOTS];
WatchPointUpCall_t fptr;
char dummy[CACHE_LINE_SZ];
} ThreadData_t;
static __thread ThreadData_t tData;
bool IsAltStackAddress(void *addr)
{
if ((addr >= tData.ss.ss_sp) && (addr < tData.ss.ss_sp + tData.ss.ss_size))
return true;
return false;
}
bool IsFSorGS(void *addr)
{
if (tData.fs_reg_val == (void *)-1)
{
syscall(SYS_arch_prctl, ARCH_GET_FS, &tData.fs_reg_val);
syscall(SYS_arch_prctl, ARCH_GET_GS, &tData.gs_reg_val);
}
// 4096 smallest one page size
if ((tData.fs_reg_val <= addr) && (addr < tData.fs_reg_val + 4096))
return true;
if ((tData.gs_reg_val <= addr) && (addr < tData.gs_reg_val + 4096))
return true;
return false;
}
/********* OS SUPPORT ****************/
// perf-util.h has it
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
{
return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
}
static pid_t gettid()
{
return syscall(__NR_gettid);
}
static inline void EnableWatchpoint(int fd)
{
// Start the event
CHECK(ioctl(fd, PERF_EVENT_IOC_ENABLE, 0));
}
static inline void DisableWatchpoint(WatchPointInfo_t *wpi)
{
// Stop the event
assert(wpi->fileHandle != -1);
CHECK(ioctl(wpi->fileHandle, PERF_EVENT_IOC_DISABLE, 0));
wpi->isActive = false;
pthread_t ptid = pthread_self();
//fprintf(stderr, "tid=%d wpi->isActive=false wpi=%p wpi->mmapBuffer=%p (DisableWatchpoint)\n", syscall(SYS_gettid), wpi, wpi->mmapBuffer);
}
static void *MAPWPMBuffer(int fd)
{
void *buf = mmap(0, 2 * wpConfig.pgsz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED)
{
EMSG("Failed to mmap : %s\n", strerror(errno));
monitor_real_abort();
}
return buf;
}
static void UNMAPWPMBuffer(void *buf)
{
CHECK(munmap(buf, 2 * wpConfig.pgsz));
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context);
__attribute__((constructor)) static void InitConfig()
{
tData.fptr = NULL;
volatile int dummyWP[MAX_WP_SLOTS];
wpConfig.isLBREnabled = true;
struct perf_event_attr peLBR = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[0],
.sample_period = 1,
.precise_ip = 3 /* arbitraty skid */,
.sample_type = PERF_SAMPLE_ADDR | PERF_SAMPLE_IP
| PERF_SAMPLE_PERIOD | PERF_SAMPLE_TIME,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
int fd = perf_event_open(&peLBR, 0, -1, -1 /*group*/, 0);
if (fd != -1)
{
fprintf(stderr,"isLBREnabled=true\n");
wpConfig.isLBREnabled = true;
}
else
{
fprintf(stderr,"isLBREnabled=false\n");
wpConfig.isLBREnabled = false;
}
CHECK(close(fd));
#if defined(FAST_BP_IOC_FLAG)
wpConfig.isWPModifyEnabled = true;
#else
wpConfig.isWPModifyEnabled = false;
#endif
//wpConfig.signalDelivered = SIGTRAP;
//wpConfig.signalDelivered = SIGIO;
//wpConfig.signalDelivered = SIGUSR1;
wpConfig.signalDelivered = SIGRTMIN + 3;
// Setup the signal handler
sigset_t block_mask;
sigfillset(&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
.sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER | SA_ONSTACK};
if (monitor_sigaction(wpConfig.signalDelivered, OnWatchPoint, 0 /*flags*/, &sa1) == -1)
{
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
wpConfig.pgsz = sysconf(_SC_PAGESIZE);
// identify max WP supported by the architecture
volatile int wpHandles[MAX_WP_SLOTS];
int i = 0;
for (; i < MAX_WP_SLOTS; i++)
{
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
.bp_type = HW_BREAKPOINT_W,
.bp_len = HW_BREAKPOINT_LEN_1,
.bp_addr = (uintptr_t)&dummyWP[i],
.sample_period = 1,
.precise_ip = 2 /* arbitraty skid */,
.sample_type = 0,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
wpHandles[i] = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (wpHandles[i] == -1)
{
break;
}
}
if (i == 0)
{
fprintf(stderr, "Cannot create a single watch point\n");
monitor_real_abort();
}
for (int j = 0; j < i; j++)
{
CHECK(close(wpHandles[j]));
}
wpConfig.maxWP = i;
// Should we get the floating point type in an access?
wpConfig.getFloatType = false;
// Get the replacement scheme
char *replacementScheme = getenv("HPCRUN_WP_REPLACEMENT_SCHEME");
if (replacementScheme)
{
if (0 == strcasecmp(replacementScheme, "AUTO"))
{
wpConfig.replacementPolicy = AUTO;
}
else if (0 == strcasecmp(replacementScheme, "OLDEST"))
{
wpConfig.replacementPolicy = OLDEST;
}
else if (0 == strcasecmp(replacementScheme, "NEWEST"))
{
wpConfig.replacementPolicy = NEWEST;
}
else
{
// default;
wpConfig.replacementPolicy = AUTO;
}
}
else
{
// default;
wpConfig.replacementPolicy = AUTO;
}
// Should we fix IP off by one?
char *fixIP = getenv("HPCRUN_WP_DONT_FIX_IP");
if (fixIP)
{
if (0 == strcasecmp(fixIP, "1"))
{
wpConfig.dontFixIP = true;
}
if (0 == strcasecmp(fixIP, "true"))
{
wpConfig.dontFixIP = true;
}
else
{
// default;
wpConfig.dontFixIP = false;
}
}
else
{
// default;
wpConfig.dontFixIP = false;
}
// Should we get the address in a WP trigger?
char *disassembleWPAddress = getenv("HPCRUN_WP_DONT_DISASSEMBLE_TRIGGER_ADDRESS");
if (disassembleWPAddress)
{
if (0 == strcasecmp(disassembleWPAddress, "1"))
{
wpConfig.dontDisassembleWPAddress = true;
}
if (0 == strcasecmp(disassembleWPAddress, "true"))
{
wpConfig.dontDisassembleWPAddress = true;
}
else
{
// default;
wpConfig.dontDisassembleWPAddress = false;
}
}
else
{
// default;
wpConfig.dontDisassembleWPAddress = false;
}
}
void RedSpyWPConfigOverride(void *v)
{
wpConfig.getFloatType = true;
}
void LoadSpyWPConfigOverride(void *v)
{
wpConfig.getFloatType = true;
}
void FalseSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TrueSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void AllSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCFalseSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCTrueSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void IPCAllSharingWPConfigOverride(void *v)
{
// replacement policy is OLDEST forced.
wpConfig.replacementPolicy = OLDEST;
}
void TemporalReuseWPConfigOverride(void *v)
{
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
void SpatialReuseWPConfigOverride(void *v)
{
// dont fix IP
wpConfig.dontFixIP = true;
wpConfig.dontDisassembleWPAddress = true;
}
static void CreateWatchPoint(WatchPointInfo_t *wpi, SampleData_t *sampleData, bool modify)
{
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_BREAKPOINT,
.size = sizeof(struct perf_event_attr),
// .bp_type = HW_BREAKPOINT_W,
// .bp_len = HW_BREAKPOINT_LEN_4,
.sample_period = 1,
.precise_ip = 2,
.sample_type = (PERF_SAMPLE_IP),
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.disabled = 0, /* enabled */
};
switch (sampleData->wpLength)
{
case 1:
pe.bp_len = HW_BREAKPOINT_LEN_1;
break;
case 2:
pe.bp_len = HW_BREAKPOINT_LEN_2;
break;
case 4:
pe.bp_len = HW_BREAKPOINT_LEN_4;
break;
case 8:
pe.bp_len = HW_BREAKPOINT_LEN_8;
break;
default:
EMSG("Unsupported .bp_len %d: %s\n", wpi->sample.wpLength, strerror(errno));
monitor_real_abort();
}
pe.bp_addr = (uintptr_t)sampleData->va;
switch (sampleData->type)
{
case WP_READ:
pe.bp_type = HW_BREAKPOINT_R;
break;
case WP_WRITE:
pe.bp_type = HW_BREAKPOINT_W;
break;
default:
pe.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
}
#if defined(FAST_BP_IOC_FLAG)
if (modify)
{
// modification
assert(wpi->fileHandle != -1);
assert(wpi->mmapBuffer != 0);
//DisableWatchpoint(wpi);
CHECK(ioctl(wpi->fileHandle, FAST_BP_IOC_FLAG, (unsigned long)(&pe)));
//if(wpi->isActive == false) {
//EnableWatchpoint(wpi->fileHandle);
//}
}
else
#endif
{
// fresh creation
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1 /*group*/, 0);
if (perf_fd == -1)
{
EMSG("Failed to open perf event file: %s\n", strerror(errno));
monitor_real_abort();
}
// Set the perf_event file to async mode
CHECK(fcntl(perf_fd, F_SETFL, fcntl(perf_fd, F_GETFL, 0) | O_ASYNC));
// Tell the file to send a signal when an event occurs
CHECK(fcntl(perf_fd, F_SETSIG, wpConfig.signalDelivered));
// Deliver the signal to this thread
struct f_owner_ex fown_ex;
fown_ex.type = F_OWNER_TID;
fown_ex.pid = gettid();
int ret = fcntl(perf_fd, F_SETOWN_EX, &fown_ex);
if (ret == -1)
{
EMSG("Failed to set the owner of the perf event file: %s\n", strerror(errno));
return;
}
// CHECK(fcntl(perf_fd, F_SETOWN, gettid()));
wpi->fileHandle = perf_fd;
// mmap the file if lbr is enabled
if (wpConfig.isLBREnabled)
{
wpi->mmapBuffer = MAPWPMBuffer(perf_fd);
}
}
wpi->isActive = true;
//fprintf(stderr, "tid=%d wpi->isActive=true wpi=%p wpi->mmapBuffer=%p (CreateWatchPoint)\n", syscall(SYS_gettid), wpi, wpi->mmapBuffer);
wpi->va = (void *)pe.bp_addr;
wpi->sample = *sampleData;
wpi->startTime = rdtsc();
}
/* create a dummy PERF_TYPE_HARDWARE event that will never fire */
static void CreateDummyHardwareEvent(void)
{
// Perf event settings
struct perf_event_attr pe = {
.type = PERF_TYPE_HARDWARE,
.size = sizeof(struct perf_event_attr),
.config = PERF_COUNT_HW_CACHE_MISSES,
.sample_period = 0x7fffffffffffffff, /* some insanely large sample period */
.precise_ip = 2,
.sample_type = PERF_SAMPLE_BRANCH_STACK,
.exclude_user = 0,
.exclude_kernel = 1,
.exclude_hv = 1,
.branch_sample_type = PERF_SAMPLE_BRANCH_ANY,
};
// Create the perf_event for this thread on all CPUs with no event group
int perf_fd = perf_event_open(&pe, 0, -1, -1, 0);
if (perf_fd == -1)
{
EMSG("Failed to open perf event file: %s\n", strerror(errno));
monitor_real_abort();
}
tData.lbrDummyFD = perf_fd;
}
static void CloseDummyHardwareEvent(int perf_fd)
{
CHECK(close(perf_fd));
}
/*********** Client interfaces *******/
static void DisArm(WatchPointInfo_t *wpi)
{
//fprintf(stderr, "tid=%d wpi->isActive=false wpi=%p wpi->mmapBuffer=%p (DisArm)\n", syscall(SYS_gettid), wpi, wpi->mmapBuffer);
// assert(wpi->isActive);
assert(wpi->fileHandle != -1);
if (wpi->mmapBuffer)
UNMAPWPMBuffer(wpi->mmapBuffer);
wpi->mmapBuffer = 0;
CHECK(close(wpi->fileHandle));
wpi->fileHandle = -1;
wpi->isActive = false;
}
static bool ArmWatchPoint(WatchPointInfo_t *wpi, SampleData_t *sampleData)
{
// if WP modification is suppoted use it
if (wpConfig.isWPModifyEnabled)
{
// Does not matter whether it was active or not.
// If it was not active, enable it.
if (wpi->fileHandle != -1)
{
CreateWatchPoint(wpi, sampleData, true);
return true;
}
}
// disable the old WP if active
if (wpi->isActive)
{
DisArm(wpi);
}
CreateWatchPoint(wpi, sampleData, false);
return true;
}
// Per thread initialization
void WatchpointThreadInit(WatchPointUpCall_t func)
{
tData.ss.ss_sp = malloc(ALT_STACK_SZ);
if (tData.ss.ss_sp == NULL)
{
EMSG("Failed to malloc ALT_STACK_SZ");
monitor_real_abort();
}
tData.ss.ss_size = ALT_STACK_SZ;
tData.ss.ss_flags = 0;
if (sigaltstack(&tData.ss, NULL) == -1)
{
EMSG("Failed sigaltstack");
monitor_real_abort();
}
tData.lbrDummyFD = -1;
tData.fptr = func;
tData.fs_reg_val = (void *)-1;
tData.gs_reg_val = (void *)-1;
srand48_r(time(NULL), &tData.randBuffer);
tData.numWatchpointTriggers = 0;
tData.numWatchpointImpreciseIP = 0;
tData.numWatchpointImpreciseAddressArbitraryLength = 0;
tData.numWatchpointImpreciseAddress8ByteLength = 0;
tData.numWatchpointDropped = 0;
tData.numSampleTriggeringWatchpoints = 0;
tData.numInsaneIP = 0;
for (int i = 0; i < wpConfig.maxWP; i++)
{
tData.watchPointArray[i].isActive = false;
tData.watchPointArray[i].fileHandle = -1;
tData.watchPointArray[i].startTime = 0;
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
//if LBR is supported create a dummy PERF_TYPE_HARDWARE for Linux workaround
if (wpConfig.isLBREnabled)
{
CreateDummyHardwareEvent();
}
}
void WatchpointThreadTerminate()
{
fprintf(stderr, "WatchpointThreadTerminate, disable watchpoint in a group\n");
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (tData.watchPointArray[i].fileHandle != -1)
{
DisArm(&tData.watchPointArray[i]);
}
}
if (tData.lbrDummyFD != -1)
{
CloseDummyHardwareEvent(tData.lbrDummyFD);
tData.lbrDummyFD = -1;
}
tData.fs_reg_val = (void *)-1;
tData.gs_reg_val = (void *)-1;
hpcrun_stats_num_watchpoints_triggered_inc(tData.numWatchpointTriggers);
hpcrun_stats_num_watchpoints_imprecise_inc(tData.numWatchpointImpreciseIP);
hpcrun_stats_num_watchpoints_imprecise_address_inc(tData.numWatchpointImpreciseAddressArbitraryLength);
hpcrun_stats_num_watchpoints_imprecise_address_8_byte_inc(tData.numWatchpointImpreciseAddress8ByteLength);
hpcrun_stats_num_insane_ip_inc(tData.numInsaneIP);
hpcrun_stats_num_watchpoints_dropped_inc(tData.numWatchpointDropped);
hpcrun_stats_num_sample_triggering_watchpoints_inc(tData.numSampleTriggeringWatchpoints);
#if 0
tData.ss.ss_flags = SS_DISABLE;
if (sigaltstack(&tData.ss, NULL) == -1){
EMSG("Failed sigaltstack WatchpointThreadTerminate");
// no need to abort , just leak the memory
// monitor_real_abort();
} else {
if(tData.ss.ss_sp)
free(tData.ss.ss_sp);
}
#endif
}
// Finds a victim slot to set a new WP
static VictimType GetVictim(int *location, ReplacementPolicy policy)
{
// If any WP slot is inactive, return it;
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (!tData.watchPointArray[i].isActive)
{
*location = i;
// Increase samplePostFull for those who survived.
for (int rest = 0; rest < wpConfig.maxWP; rest++)
{
if (tData.watchPointArray[rest].isActive)
{
tData.watchPointArray[rest].samplePostFull++;
}
}
return EMPTY_SLOT;
}
}
switch (policy)
{
case AUTO:
{
// Shuffle the visit order
int slots[MAX_WP_SLOTS];
for (int i = 0; i < wpConfig.maxWP; i++)
slots[i] = i;
// Shuffle
for (int i = 0; i < wpConfig.maxWP; i++)
{
long int randVal;
lrand48_r(&tData.randBuffer, &randVal);
randVal = randVal % wpConfig.maxWP;
int tmp = slots[i];
slots[i] = slots[randVal];
slots[randVal] = tmp;
}
// attempt to replace each WP with its own probability
for (int i = 0; i < wpConfig.maxWP; i++)
{
int loc = slots[i];
double probabilityToReplace = 1.0 / (1.0 + (double)tData.watchPointArray[loc].samplePostFull);
double randValue;
drand48_r(&tData.randBuffer, &randValue);
// update tData.samplePostFull
tData.watchPointArray[loc].samplePostFull++;
if (randValue <= probabilityToReplace)
{
*location = loc;
// TODO: Milind: Not sure whether I should increment samplePostFull of the remainiing slots.
// In Qingsen's experiments, doing this not hurt.
for (int rest = i + 1; rest < wpConfig.maxWP; rest++)
{
tData.watchPointArray[slots[rest]].samplePostFull++;
}
return NON_EMPTY_SLOT;
}
}
// this is an indication not to replace, but if the client chooses to force, they can
*location = slots[0] /*random value*/;
return NONE_AVAILABLE;
}
break;
case NEWEST:
{
// Always replace the newest
int64_t newestTime = 0;
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (newestTime < tData.watchPointArray[i].startTime)
{
*location = i;
newestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case OLDEST:
{
// Always replace the oldest
int64_t oldestTime = INT64_MAX;
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (oldestTime > tData.watchPointArray[i].startTime)
{
*location = i;
oldestTime = tData.watchPointArray[i].startTime;
}
}
return NON_EMPTY_SLOT;
}
break;
case EMPTY_SLOT_ONLY:
{
return NONE_AVAILABLE;
}
break;
default:
return NONE_AVAILABLE;
}
// No unarmed WP slot found.
}
static inline void
rmb(void)
{
asm volatile("lfence" ::
: "memory");
}
static void ConsumeAllRingBufferData(void *mbuf)
{
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
unsigned long tail;
size_t avail_sz;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
void *data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
rmb();
#if 0
if(avail_sz == 0 )
EMSG("\n avail_sz = %d\n", avail_sz);
else
EMSG("\n EEavail_sz = %d\n", avail_sz);
#endif
// reset tail to head
hdr->data_tail = hdr->data_head;
}
static int ReadMampBuffer(void *mbuf, void *buf, size_t sz)
{
struct perf_event_mmap_page *hdr = (struct perf_event_mmap_page *)mbuf;
void *data;
unsigned long tail;
size_t avail_sz, m, c;
size_t pgmsk = wpConfig.pgsz - 1;
/*
* data points to beginning of buffer payload
*/
data = ((void *)hdr) + wpConfig.pgsz;
/*
* position of tail within the buffer payload
*/
tail = hdr->data_tail & pgmsk;
/*
* size of what is available
*
* data_head, data_tail never wrap around
*/
avail_sz = hdr->data_head - hdr->data_tail;
if (sz > avail_sz)
{
printf("\n sz > avail_sz: sz = %lu, avail_sz = %lu\n", sz, avail_sz);
rmb();
return -1;
}
/* From perf_event_open() manpage */
rmb();
/*
* sz <= avail_sz, we can satisfy the request
*/
/*
* c = size till end of buffer
*
* buffer payload size is necessarily
* a power of two, so we can do:
*/
c = pgmsk + 1 - tail;
/*
* min with requested size
*/
m = c < sz ? c : sz;
/* copy beginning */
memcpy(buf, data + tail, m);
/*
* copy wrapped around leftover
*/
if (sz > m)
memcpy(buf + m, data, sz - m);
hdr->data_tail += sz;
return 0;
}
void SkipBuffer(struct perf_event_mmap_page *hdr, size_t sz)
{
if ((hdr->data_tail + sz) > hdr->data_head)
sz = hdr->data_head - hdr->data_tail;
rmb();
hdr->data_tail += sz;
}
static inline bool IsPCSane(void *contextPC, void *possiblePC)
{
if ((possiblePC == 0) || ((possiblePC > contextPC) || (contextPC - possiblePC > 15)))
{
return false;
}
return true;
}
double ProportionOfWatchpointAmongOthersSharingTheSameContext(WatchPointInfo_t *wpi)
{
#if 0
int share = 0;
for(int i = 0; i < wpConfig.maxWP; i++) {
if(tData.watchPointArray[i].isActive && tData.watchPointArray[i].sample.node == wpi->sample.node) {
share ++;
}
}
assert(share > 0);
return 1.0/share;
#else
return 1.0;
#endif
}
static inline void *GetPatchedIP(void *contextIP)
{
void *patchedIP;
void *excludeList[MAX_WP_SLOTS] = {0};
int numExcludes = 0;
for (int idx = 0; idx < wpConfig.maxWP; idx++)
{
if (tData.watchPointArray[idx].isActive)
{
excludeList[numExcludes] = tData.watchPointArray[idx].va;
numExcludes++;
}
}
get_previous_instruction(contextIP, &patchedIP, excludeList, numExcludes);
return patchedIP;
}
// Gather all useful data when a WP triggers
static bool CollectWatchPointTriggerInfo(WatchPointInfo_t *wpi, WatchPointTrigger_t *wpt, void *context)
{
//struct perf_event_mmap_page * b = wpi->mmapBuffer;
struct perf_event_header hdr;
//fprintf(stderr, "CollectWatchPointTriggerInfo wpi=%p wpi->mmapBuffer=%p", wpi, wpi->mmapBuffer);
if (ReadMampBuffer(wpi->mmapBuffer, &hdr, sizeof(struct perf_event_header)) < 0)
{
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
switch (hdr.type)
{
case PERF_RECORD_SAMPLE:
assert(hdr.type & PERF_SAMPLE_IP);
void *contextIP = hpcrun_context_pc(context);
void *preciseIP = (void *)-1;
void *patchedIP = (void *)-1;
void *reliableIP = (void *)-1;
void *addr = (void *)-1;
if (hdr.type & PERF_SAMPLE_IP)
{
if (ReadMampBuffer(wpi->mmapBuffer, &preciseIP, sizeof(uint64_t)) < 0)
{
EMSG("Failed to ReadMampBuffer: %s\n", strerror(errno));
monitor_real_abort();
}
if (!(hdr.misc & PERF_RECORD_MISC_EXACT_IP))
{
//EMSG("PERF_SAMPLE_IP imprecise\n");
tData.numWatchpointImpreciseIP++;
if (wpConfig.dontFixIP == false)
{
patchedIP = GetPatchedIP(contextIP);
if (!IsPCSane(contextIP, patchedIP))
{
EMSG("get_previous_instruction failed \n");
tData.numInsaneIP++;
goto ErrExit;
}
reliableIP = patchedIP;
}
else
{
// Fake as requested by Xu for reuse clients
reliableIP = contextIP - 1;
}
//EMSG("PERF_SAMPLE_IP imprecise: %p patched to %p in WP handler\n", tmpIP, patchedIP);
}
else
{
#if 0 // Precise PC can be far away in jump/call instructions. \
// Ensure the "precise" PC is within one instruction from context pc
if(!IsPCSane(contextIP, preciseIP)) {
tData.numInsaneIP ++;
//EMSG("get_previous_instruction failed \n");
goto ErrExit;
}
#endif
reliableIP = preciseIP;
//if(! ((ip <= tmpIP) && (tmpIP-ip < 20))) ConsumeAllRingBufferData(wpi->mmapBuffer);
//assert( (ip <= tmpIP) && (tmpIP-ip < 20));
}
}
else
{
// Should happen only for wpConfig.isLBREnabled==false
assert(wpConfig.isLBREnabled == false);
// Fall back to old scheme of disassembling and capturing the info
if (wpConfig.dontFixIP == false)
{
patchedIP = GetPatchedIP(contextIP);
if (!IsPCSane(contextIP, patchedIP))
{
tData.numInsaneIP++;
//EMSG("PERF_SAMPLE_IP imprecise: %p failed to patch in WP handler, WP dropped\n", tmpIP);
goto ErrExit;
}
reliableIP = patchedIP;
}
else
{
// Fake as requested by Xu for reuse clients
reliableIP = contextIP - 1;
}
}
wpt->pc = reliableIP;
if (wpConfig.dontDisassembleWPAddress == false)
{
FloatType *floatType = wpConfig.getFloatType ? &wpt->floatType : 0;
if (false == get_mem_access_length_and_type_address(wpt->pc, (uint32_t *)&(wpt->accessLength), &(wpt->accessType), floatType, context, &addr))
{
//EMSG("WP triggered on a non Load/Store add = %p\n", wpt->pc);
goto ErrExit;
}
if (wpt->accessLength == 0)
{
//EMSG("WP triggered 0 access length! at pc=%p\n", wpt->pc);
goto ErrExit;
}
void *patchedAddr = (void *)-1;
// Stack affecting addresses will be off by 8
// Some instructions affect the address computing register: mov (%rax),%eax
// Hence, if the addresses do NOT overlap, merely use the Sample address!
if (false == ADDRESSES_OVERLAP(addr, wpt->accessLength, wpi->va, wpi->sample.wpLength))
{
if ((wpt->accessLength == sizeof(void *)) && (wpt->accessLength == wpi->sample.wpLength) && (((addr - wpi->va) == sizeof(void *)) || ((wpi->va - addr) == sizeof(void *))))
tData.numWatchpointImpreciseAddress8ByteLength++;
else
tData.numWatchpointImpreciseAddressArbitraryLength++;
tData.numWatchpointImpreciseAddressArbitraryLength++;
patchedAddr = wpi->va;
}
else
{
patchedAddr = addr;
}
wpt->va = patchedAddr;
}
else
{
wpt->va = (void *)-1;
}
wpt->ctxt = context;
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return true;
case PERF_RECORD_EXIT:
EMSG("PERF_RECORD_EXIT sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_LOST:
EMSG("PERF_RECORD_LOST sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_THROTTLE:
EMSG("PERF_RECORD_THROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
case PERF_RECORD_UNTHROTTLE:
EMSG("PERF_RECORD_UNTHROTTLE sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
default:
EMSG("unknown sample type %d sz=%d\n", hdr.type, hdr.size);
//SkipBuffer(wpi->mmapBuffer , hdr.size - sizeof(hdr));
goto ErrExit;
}
ErrExit:
// We must cleanup the mmap buffer if there is any data left
ConsumeAllRingBufferData(wpi->mmapBuffer);
return false;
}
void DisableWatchpointWrapper(WatchPointInfo_t *wpi)
{
//fprintf(stderr, "tid=%d wpi=%p wpi->mmapBuffer=%p DisableWatchpointWrapper\n", syscall(SYS_gettid), wpi, wpi->mmapBuffer);
if (wpConfig.isWPModifyEnabled)
{
DisableWatchpoint(wpi);
}
else
{
DisArm(wpi);
}
}
static int OnWatchPoint(int signum, siginfo_t *info, void *context)
{
//volatile int x;
//fprintf(stderr, "OnWatchPoint=%p\n", &x);
// Disable HPCRUN sampling
// if the trap is already in hpcrun, return
// If the interrupt came from inside our code, then drop the sample
// and return and avoid any MSG.
void *pc = hpcrun_context_pc(context);
if (!hpcrun_safe_enter_async(pc))
return 0;
linux_perf_events_pause();
tData.numWatchpointTriggers++;
//fprintf(stderr, " numWatchpointTriggers = %lu, \n", tData.numWatchpointTriggers);
//find which watchpoint fired
int location = -1;
for (int i = 0; i < wpConfig.maxWP; i++)
{
if ((tData.watchPointArray[i].isActive) && (info->si_fd == tData.watchPointArray[i].fileHandle))
{
location = i;
break;
}
}
// Ensure it is an active WP
if (location == -1)
{
EMSG("\n WP trigger did not match any known active WP\n");
//monitor_real_abort();
hpcrun_safe_exit();
linux_perf_events_resume();
//fprintf("\n WP trigger did not match any known active WP\n");
return 0;
}
WatchPointTrigger_t wpt;
WPTriggerActionType retVal;
WatchPointInfo_t *wpi = &tData.watchPointArray[location];
// Perform Pre watchpoint action
bool oriVal = false;
switch (wpi->sample.preWPAction)
{
case DISABLE_WP:
DisableWatchpointWrapper(wpi);
break;
case DISABLE_ALL_WP:
DisableWatchpoint(wpi);
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (tData.watchPointArray[i].isActive)
{
//fprintf(stderr, "tid=%d wpi=%p wpi->mmapBuffer=%p DISABLE_ALL_WP\n", syscall(SYS_gettid), &tData.watchPointArray[i], tData.watchPointArray[i].mmapBuffer);
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
}
break;
default:
assert(0 && "NYI");
monitor_real_abort();
break;
}
if (false == CollectWatchPointTriggerInfo(wpi, &wpt, context))
{
tData.numWatchpointDropped++;
retVal = DISABLE_WP; // disable if unable to collect any info.
DisArm(wpi);
}
else
{
retVal = tData.fptr(wpi, 0, wpt.accessLength /* invalid*/, &wpt);
DisArm(wpi);
}
// Let the client take action.
switch (retVal)
{
case DISABLE_WP:
{
if (wpi->isActive)
{
DisableWatchpointWrapper(wpi);
}
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case DISABLE_ALL_WP:
{
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (tData.watchPointArray[i].isActive)
{
DisableWatchpointWrapper(&tData.watchPointArray[i]);
}
// Reset per WP probability
tData.watchPointArray[i].samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
}
break;
case ALREADY_DISABLED:
{ // Already disabled, perhaps in pre-WP action
assert(wpi->isActive == false);
// Reset per WP probability
wpi->samplePostFull = SAMPLES_POST_FULL_RESET_VAL;
}
break;
case RETAIN_WP:
{ // resurrect this wp
if (!wpi->isActive)
{
EnableWatchpoint(wpi->fileHandle);
wpi->isActive = true;
//fprintf(stderr, "tid=%d wpi->isActive=true wpi=%p wpi->mmapBuffer=%p (OnWatchPoint)\n", syscall(SYS_gettid), wpi, wpi->mmapBuffer);
}
}
break;
default: // Retain the state
break;
}
// hpcrun_all_sources_start();
linux_perf_events_resume();
hpcrun_safe_exit();
return 0;
}
static bool ValidateWPData(SampleData_t *sampleData)
{
// Check alignment
#if defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(__amd64)
switch (sampleData->wpLength)
{
case 0:
EMSG("\nValidateWPData: 0 length WP never allowed");
monitor_real_abort();
case 1:
case 2:
case 4:
case 8:
if (IS_ALIGNED(sampleData->va, sampleData->wpLength))
return true; // unaligned
else
return false;
break;
default:
EMSG("Unsuppported WP length %d", sampleData->wpLength);
monitor_real_abort();
return false; // unsupported alignment
}
#else
#error "unknown architecture"
#endif
}
static bool IsOveralpped(SampleData_t *sampleData)
{
// Is a WP with the same/overlapping address active?
for (int i = 0; i < wpConfig.maxWP; i++)
{
if (tData.watchPointArray[i].isActive)
{
if (ADDRESSES_OVERLAP(tData.watchPointArray[i].sample.va, tData.watchPointArray[i].sample.wpLength, sampleData->va, sampleData->wpLength))
{
return true;
}
}
}
return false;
}
void CaptureValue(SampleData_t *sampleData, WatchPointInfo_t *wpi)
{
void *valLoc = &(wpi->value[0]);
switch (sampleData->wpLength)
{
default: // force 1 length
case 1:
*((uint8_t *)valLoc) = *(uint8_t *)(sampleData->va);
break;
case 2:
*((uint16_t *)valLoc) = *(uint16_t *)(sampleData->va);
break;
case 4:
*((uint32_t *)valLoc) = *(uint32_t *)(sampleData->va);
break;
case 8:
*((uint64_t *)valLoc) = *(uint64_t *)(sampleData->va);
break;
}
}
bool SubscribeWatchpoint(SampleData_t *sampleData, OverwritePolicy overwritePolicy, bool captureValue)
{
if (ValidateWPData(sampleData) == false)
{
return false;
}
if (IsOveralpped(sampleData))
{
return false; // drop the sample if it overlaps an existing address
}
// No overlap, look for a victim slot
int victimLocation = -1;
// Find a slot to install WP
VictimType r = GetVictim(&victimLocation, wpConfig.replacementPolicy);
if (r != NONE_AVAILABLE)
{
// VV IMP: Capture value before arming the WP.
if (captureValue)
CaptureValue(sampleData, &tData.watchPointArray[victimLocation]);
// I know the error case that we have captured the value but ArmWatchPoint fails.
// I am not handling that corner case because ArmWatchPoint() will fail with a monitor_real_abort().
if (ArmWatchPoint(&tData.watchPointArray[victimLocation], sampleData) == false)
{
//LOG to hpcrun log
EMSG("ArmWatchPoint failed for address %p", sampleData->va);
return false;
}
return true;
}
return false;
}
#ifdef TEST
#include <omp.h>
__thread volatile int cnt;
WPUpCallTRetType Test1UpCall(WatchPointInfo_t *wp, WatchPointTrigger_t *wt)
{
printf("\n Test1UpCall %p\n", wt->va);
if (wpConfig.isLBREnabled)
assert(wp->sample.va == wt->va);
cnt++;
return DISABLE;
}
void TestBasic()
{
tData.fptr = Test1UpCall;
sigset_t block_mask;
sigemptyset(&block_mask);
// Set a signal handler for SIGUSR1
struct sigaction sa1 = {
.sa_sigaction = OnWatchPoint,
// .sa_mask = block_mask,
.sa_flags = SA_SIGINFO | SA_RESTART | SA_NODEFER};
if (sigaction(wpConfig.signalDelivered, &sa1, NULL) == -1)
{
fprintf(stderr, "Failed to set WHICH_SIG handler: %s\n", strerror(errno));
monitor_real_abort();
}
WatchpointThreadInit();
int N = 10000;
volatile int dummyWPLocation[10000];
cnt = 0;
for (int i = 0; i < N; i++)
{
SampleData_t s = {.va = &dummyWPLocation[i], .wpLength = sizeof(int), .type = WP_WRITE};
SubscribeWatchpoint(&s, AUTO);
}
for (int i = 0; i < N; i++)
{
dummyWPLocation[i]++;
}
printf("\n cnt = %d\n", cnt);
assert(cnt == wpConfig.maxWP);
WatchpointThreadTerminate();
}
int main()
{
printf("\n Test 1: single threaded");
while (1)
{
#pragma omp parallel
{
TestBasic();
}
}
return 0;
}
#endif
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/memory_.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/resample.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImageChannel() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImageChannel method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const CompositeOperator compose,Image *composite_image,
% const ssize_t x_offset,const ssize_t y_offset)
% MagickBooleanType CompositeImageChannel(Image *image,
% const ChannelType channel,const CompositeOperator compose,
% Image *composite_image,const ssize_t x_offset,const ssize_t y_offset)
%
% A description of each parameter follows:
%
% o image: the destination image, modified by he composition
%
% o channel: the channel.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o composite_image: the composite (source) image.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'composite_image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o "compose:outside-overlay"
% Modify how the composition is to effect areas not directly covered
% by the 'composite_image' at the offset given. Normally this is
% dependant on the 'compose' method, especially Duff-Porter methods.
%
% If set to "false" then disable all normal handling of pixels not
% covered by the composite_image. Typically used for repeated tiling
% of the composite_image by the calling API.
%
% Previous to IM v6.5.3-3 this was called "modify-outside-overlay"
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
/*
** Programmers notes on SVG specification.
**
** A Composition is defined by...
** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc)
** Y = 1 for source preserved
** Z = 1 for destination preserved
**
** Conversion to transparency (then optimized)
** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
**
** Where...
** Sca = Sc*Sa normalized Source color divided by Source alpha
** Dca = Dc*Da normalized Dest color divided by Dest alpha
** Dc' = Dca'/Da' the desired color value for this channel.
**
** Da' in in the follow formula as 'gamma' The resulting alpla value.
**
**
** Most functions use a blending mode of over (X=1,Y=1,Z=1)
** this results in the following optimizations...
** gamma = Sa+Da-Sa*Da;
** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta;
** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma
**
** The above SVG definitions also definate that Mathematical Composition
** methods should use a 'Over' blending mode for Alpha Channel.
** It however was not applied for composition modes of 'Plus', 'Minus',
** the modulus versions of 'Add' and 'Subtract'.
**
**
** Mathematical operator changes to be applied from IM v6.7...
**
** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
** 'ModulusAdd' and 'ModulusSubtract' for clarity.
**
** 2/ All mathematical compositions work as per the SVG specification
** with regard to blending. This now includes 'ModulusAdd' and
** 'ModulusSubtract'.
**
** 3/ When the special channel flag 'sync' (syncronize channel updates)
** is turned off (enabled by default) then mathematical compositions are
** only performed on the channels specified, and are applied
** independantally of each other. In other words the mathematics is
** performed as 'pure' mathematical operations, rather than as image
** operations.
*/
static inline MagickRealType Atop(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,
const MagickRealType magick_unused(Da))
{
return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */
}
static inline void CompositeAtop(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */
composite->red=Atop(p->red,Sa,q->red,1.0);
composite->green=Atop(p->green,Sa,q->green,1.0);
composite->blue=Atop(p->blue,Sa,q->blue,1.0);
if (q->colorspace == CMYKColorspace)
composite->index=Atop(p->index,Sa,q->index,1.0);
}
/*
What is this Composition method for? Can't find any specification!
WARNING this is not doing correct 'over' blend handling (Anthony Thyssen).
*/
static inline void CompositeBumpmap(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
intensity;
intensity=MagickPixelIntensity(p);
composite->red=QuantumScale*intensity*q->red;
composite->green=QuantumScale*intensity*q->green;
composite->blue=QuantumScale*intensity*q->blue;
composite->opacity=(MagickRealType) QuantumScale*intensity*
p->opacity;
if (q->colorspace == CMYKColorspace)
composite->index=QuantumScale*intensity*q->index;
}
static inline void CompositeClear(const MagickPixelPacket *q,
MagickPixelPacket *composite)
{
composite->opacity=(MagickRealType) TransparentOpacity;
composite->red=0.0;
composite->green=0.0;
composite->blue=0.0;
if (q->colorspace == CMYKColorspace)
composite->index=0.0;
}
static MagickRealType ColorBurn(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification.
*/
if (Sca*Da + Dca*Sa <= Sa*Da)
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa));
#else
/*
March 2009 SVG specification.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon))
return(Sa*Da+Dca*(1.0-Sa));
if (Sca < MagickEpsilon)
return(Dca*(1.0-Sa));
return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
#endif
}
static inline void CompositeColorBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType ColorDodge(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification.
*/
if ((Sca*Da+Dca*Sa) >= Sa*Da)
return( Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) );
return( Dca*Sa*Sa/(Sa-Sca) + Sca*(1.0-Da) + Dca*(1.0-Sa) );
#endif
#if 0
/*
New specification, March 2009 SVG specification. This specification was
also wrong of non-overlap cases.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca)));
#endif
/*
Working from first principles using the original formula:
f(Sc,Dc) = Dc/(1-Sc)
This works correctly! Looks like the 2004 model was right but just
required a extra condition for correct handling.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeColorDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Darken(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p < q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeDarken(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
MagickRealType
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMax(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMin(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMin(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMin(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMin(p->index,q->index);
}
}
static inline void CompositeDarkenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use intensity only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
static inline MagickRealType Difference(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,const MagickRealType Da)
{
/* Optimized by Multipling by QuantumRange (taken from gamma). */
return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q));
}
static inline void CompositeDifference(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
/* Values are not normalized as an optimization. */
composite->red=gamma*Difference(p->red,Sa,q->red,Da);
composite->green=gamma*Difference(p->green,Sa,q->green,Da);
composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Difference(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-fabs(p->opacity - q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=fabs(p->red - q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=fabs(p->green - q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=fabs(p->blue - q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=fabs(p->index - q->index);
}
}
static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
/*
Divide Source by Destination
f(Sc,Dc) = Sc / Dc
But with appropriate handling for special case of Dc == 0 specifically
so that f(Black,Black)=Black and f(non-Black,Black)=White.
It is however also important to correctly do 'over' alpha blending which
is why the formula becomes so complex.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Dca) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeDivide(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0);
}
}
static MagickRealType Exclusion(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeExclusion(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
gamma,
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Exclusion(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Exclusion(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Exclusion(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Exclusion(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0);
}
}
static MagickRealType HardLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
if ((2.0*Sca) < Sa)
return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeHardLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static void CompositeHSB(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,double *hue,double *saturation,double *brightness)
{
MagickRealType
delta,
max,
min;
/*
Convert RGB to HSB colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(brightness != (double *) NULL);
max=(red > green ? red : green);
if (blue > max)
max=blue;
min=(red < green ? red : green);
if (blue < min)
min=blue;
*hue=0.0;
*saturation=0.0;
*brightness=(double) (QuantumScale*max);
if (max == 0.0)
return;
*saturation=(double) (1.0-min/max);
delta=max-min;
if (delta == 0.0)
return;
if (red == max)
*hue=(double) ((green-blue)/delta);
else
if (green == max)
*hue=(double) (2.0+(blue-red)/delta);
else
if (blue == max)
*hue=(double) (4.0+(red-green)/delta);
*hue/=6.0;
if (*hue < 0.0)
*hue+=1.0;
}
static inline MagickRealType In(const MagickRealType p,
const MagickRealType Sa,const MagickRealType magick_unused(q),
const MagickRealType Da)
{
return(Sa*p*Da);
}
static inline void CompositeIn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
gamma,
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*Da;
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*In(p->red,Sa,q->red,Da);
composite->green=gamma*In(p->green,Sa,q->green,Da);
composite->blue=gamma*In(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*In(p->index,Sa,q->index,Da);
}
static inline MagickRealType Lighten(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p > q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeLighten(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Lighten is also equvalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
MagickRealType
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMin(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMax(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMax(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMax(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMax(p->index,q->index);
}
}
static inline void CompositeLightenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use Intenisty only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
#if 0
static inline MagickRealType LinearDodge(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearDodge: simplifies to a trivial formula
f(Sc,Dc) = Sc + Dc
Dca' = Sca + Dca
*/
return(Sca+Dca);
}
#endif
static inline void CompositeLinearDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*(p->red*Sa+q->red*Da);
composite->green=gamma*(p->green*Sa+q->green*Da);
composite->blue=gamma*(p->blue*Sa+q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*(p->index*Sa+q->index*Da);
}
static inline MagickRealType LinearBurn(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
return(Sca+Dca-Sa*Da);
}
static inline void CompositeLinearBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType LinearLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Previous formula, was only valid for fully-opaque images.
*/
return(Dca+2*Sca-1.0);
#else
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
return((Sca-Sa)*Da+Sca+Dca);
#endif
}
static inline void CompositeLinearLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Mathematics(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da,
const GeometryInfo *geometry_info)
{
/*
'Mathematics' a free form user control mathematical composition is defined
as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite as
a command separated 'geometry' string in "compose:args" image artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+
geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+
Dca*(1.0-Sa));
}
static inline void CompositeMathematics(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo
*args, MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da,args);
composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da,args);
composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da,args);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da,args);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args);
}
}
static inline void CompositePlus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
/*
NOTE: "Plus" does not use 'over' alpha-blending but uses a
special 'plus' form of alph-blending. It is the ONLY mathematical
operator to do this. this is what makes it different to the
otherwise equivalent "LinearDodge" composition method.
Note however that color channels are still effected by the alpha channel
as a result of the blending, making it just as useless for independant
channel maths, just like all other mathematical composition methods.
As such the removal of the 'sync' flag, is still a usful convention.
The MagickPixelCompositePlus() function is defined in
"composite-private.h" so it can also be used for Image Blending.
*/
MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=p->opacity+q->opacity-QuantumRange;
if ( (channel & RedChannel) != 0 )
composite->red=p->red+q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green+q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue+q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index+q->index;
}
}
static inline MagickRealType Minus(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,
const MagickRealType magick_unused(Da))
{
/*
Minus Source from Destination
f(Sc,Dc) = Sc - Dc
*/
return(Sca + Dca - 2*Dca*Sa);
}
static inline void CompositeMinus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-(Sa-Da));
if ( (channel & RedChannel) != 0 )
composite->red=p->red-q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green-q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue-q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index-q->index;
}
}
static inline MagickRealType ModulusAdd(const MagickRealType p,
const MagickRealType Sa, const MagickRealType q, const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p+q;
if (pixel > QuantumRange)
pixel-=(QuantumRange+1.0);
return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa));
}
static inline void CompositeModulusAdd(const MagickPixelPacket *p,
const MagickPixelPacket *q, const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=ModulusAdd(p->red,Sa,q->red,Da);
composite->green=ModulusAdd(p->green,Sa,q->green,Da);
composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusAdd(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusAdd(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType ModulusSubtract(const MagickRealType p,
const MagickRealType Sa, const MagickRealType q, const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p-q;
if (pixel < 0.0)
pixel+=(QuantumRange+1.0);
return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa));
}
static inline void CompositeModulusSubtract(const MagickPixelPacket *p,
const MagickPixelPacket *q, const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma = RoundToUnity(Sa+Da-Sa*Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=ModulusSubtract(p->red,Sa,q->red,Da);
composite->green=ModulusSubtract(p->green,Sa,q->green,Da);
composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusSubtract(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusSubtract(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType Multiply(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeMultiply(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Sa*Da);
if ( (channel & RedChannel) != 0 )
composite->red=QuantumScale*p->red*q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumScale*p->green*q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumScale*p->blue*q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumScale*p->index*q->index;
}
}
static inline MagickRealType Out(const MagickRealType p,
const MagickRealType Sa,const MagickRealType magick_unused(q),
const MagickRealType Da)
{
return(Sa*p*(1.0-Da));
}
static inline void CompositeOut(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*(1.0-Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Out(p->red,Sa,q->red,Da);
composite->green=gamma*Out(p->green,Sa,q->green,Da);
composite->blue=gamma*Out(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Out(p->index,Sa,q->index,Da);
}
static MagickRealType PegtopLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PegTop: A Soft-Light alternative: A continuous version of the Softlight
function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs(Da) < MagickEpsilon)
return(Sca);
return(Dca*Dca*(Sa-2*Sca)/Da+Sca*(2*Dca+1-Da)+Dca*(1-Sa));
}
static inline void CompositePegtopLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType PinLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if (Dca*Sa < Da*(2*Sca-Sa))
return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
if ((Dca*Sa) > (2*Sca*Da))
return(Sca*Da+Sca+Dca*(1.0-Sa));
return(Sca*(1.0-Da)+Dca);
}
static inline void CompositePinLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Screen(const MagickRealType Sca,
const MagickRealType Dca)
{
/* Screen: A negated multiply
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
return(Sca+Dca-Sca*Dca);
}
static inline void CompositeScreen(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
Sa*=QuantumScale; Da*=QuantumScale; /* optimization */
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Screen(p->red*Sa,q->red*Da);
composite->green=gamma*Screen(p->green*Sa,q->green*Da);
composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Screen(p->index*Sa,q->index*Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Screen(Sa,Da));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*Screen(QuantumScale*p->red,
QuantumScale*q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*Screen(QuantumScale*p->green,
QuantumScale*q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*Screen(QuantumScale*p->blue,
QuantumScale*q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*Screen(QuantumScale*p->index,
QuantumScale*q->index);
}
}
static MagickRealType SoftLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification -- was found to be incorrect
See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html.
*/
if (2.0*Sca < Sa)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (8.0*Dca <= Da)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+
Dca*(1.0-Sa));
#else
MagickRealType
alpha,
beta;
/*
New specification: March 2009 SVG specification.
*/
alpha=Dca/Da;
if ((2.0*Sca) < Sa)
return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0*
alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
}
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
#endif
}
static inline void CompositeSoftLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
/*
Depreciated
Multiply difference by amount, if differance larger than threshold???
What use this is is completely unknown
The Opacity calculation appears to be inverted -- Anthony Thyssen
*/
static inline MagickRealType Threshold(const MagickRealType p,
const MagickRealType q,const MagickRealType threshold,
const MagickRealType amount)
{
MagickRealType
delta;
delta=p-q;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
return(q);
return(q+delta*amount);
}
static inline void CompositeThreshold(const MagickPixelPacket *p,
const MagickPixelPacket *q,const MagickRealType threshold,
const MagickRealType amount,MagickPixelPacket *composite)
{
composite->red=Threshold(p->red,q->red,threshold,amount);
composite->green=Threshold(p->green,q->green,threshold,amount);
composite->blue=Threshold(p->blue,q->blue,threshold,amount);
composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity,
threshold,amount);
if (q->colorspace == CMYKColorspace)
composite->index=Threshold(p->index,q->index,threshold,amount);
}
static MagickRealType VividLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon))
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
if ((2*Sca) <= Sa)
return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeVividLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*(1-Da)+Dca*(1-Sa));
}
static inline void CompositeXor(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da);
}
static void HSBComposite(const double hue,const double saturation,
const double brightness,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
f,
h,
p,
q,
t;
/*
Convert HSB to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
if (saturation == 0.0)
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(*red);
*blue=(*red);
return;
}
h=6.0*(hue-floor(hue));
f=h-floor((double) h);
p=brightness*(1.0-saturation);
q=brightness*(1.0-saturation*f);
t=brightness*(1.0-saturation*(1.0-f));
switch ((int) h)
{
case 0:
default:
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(MagickRealType) QuantumRange*t;
*blue=(MagickRealType) QuantumRange*p;
break;
}
case 1:
{
*red=(MagickRealType) QuantumRange*q;
*green=(MagickRealType) QuantumRange*brightness;
*blue=(MagickRealType) QuantumRange*p;
break;
}
case 2:
{
*red=(MagickRealType) QuantumRange*p;
*green=(MagickRealType) QuantumRange*brightness;
*blue=(MagickRealType) QuantumRange*t;
break;
}
case 3:
{
*red=(MagickRealType) QuantumRange*p;
*green=(MagickRealType) QuantumRange*q;
*blue=(MagickRealType) QuantumRange*brightness;
break;
}
case 4:
{
*red=(MagickRealType) QuantumRange*t;
*green=(MagickRealType) QuantumRange*p;
*blue=(MagickRealType) QuantumRange*brightness;
break;
}
case 5:
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(MagickRealType) QuantumRange*p;
*blue=(MagickRealType) QuantumRange*q;
break;
}
}
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const CompositeOperator compose,const Image *composite_image,
const ssize_t x_offset,const ssize_t y_offset)
{
MagickBooleanType
status;
status=CompositeImageChannel(image,DefaultChannels,compose,composite_image,
x_offset,y_offset);
return(status);
}
MagickExport MagickBooleanType CompositeImageChannel(Image *image,
const ChannelType channel,const CompositeOperator compose,
const Image *composite_image,const ssize_t x_offset,const ssize_t y_offset)
{
#define CompositeImageTag "Composite/Image"
CacheView
*composite_view,
*image_view;
const char
*value;
double
sans;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
Image
*destination_image;
MagickBooleanType
modify_outside_overlay,
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
amount,
destination_dissolve,
midpoint,
percent_brightness,
percent_saturation,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
/*
Prepare composite image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite_image != (Image *) NULL);
assert(composite_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
destination_image=(Image *) NULL;
amount=0.5;
destination_dissolve=1.0;
modify_outside_overlay=MagickFalse;
percent_brightness=100.0;
percent_saturation=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case ClearCompositeOp:
case SrcCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
{
/*
Modify destination outside the overlaid region.
*/
modify_outside_overlay=MagickTrue;
break;
}
case OverCompositeOp:
{
if (image->matte != MagickFalse)
break;
if (composite_image->matte != MagickFalse)
break;
}
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) composite_image->columns) >= (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) composite_image->rows) >= (ssize_t) image->rows)
break;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
composite_view=AcquireCacheView(composite_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*composite_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
composite_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
composite_indexes=GetCacheViewVirtualIndexQueue(composite_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
(void) CopyMagickMemory(q,p,composite_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(composite_indexes != (const IndexPacket *) NULL))
(void) CopyMagickMemory(indexes,composite_indexes,
composite_image->columns*sizeof(*indexes));
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify destination outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
modify_outside_overlay=MagickTrue;
break;
}
case BlurCompositeOp:
{
CacheView
*composite_view,
*destination_view;
MagickPixelPacket
pixel;
MagickRealType
angle_range,
angle_start,
height,
width;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
destination_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (destination_image == (Image *) NULL)
return(MagickFalse);
/*
Determine the horizontal and vertical maximim blur.
*/
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0 )
{
destination_image=DestroyImage(destination_image);
return(MagickFalse);
}
width=geometry_info.rho;
height=geometry_info.sigma;
blur.x1=geometry_info.rho;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=geometry_info.sigma;
angle_start=0.0;
angle_range=0.0;
if ((flags & HeightValue) == 0)
blur.y2=blur.x1;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Blur Image by resampling.
*/
pixel=zero;
exception=(&image->exception);
resample_filter=AcquireResampleFilter(image,&image->exception);
SetResampleFilter(resample_filter,CubicFilter,2.0);
destination_view=AcquireCacheView(destination_image);
composite_view=AcquireCacheView(composite_image);
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict r;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
for (x=0; x < (ssize_t) composite_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,blur.x1*QuantumScale*
GetPixelRed(p),blur.y1*QuantumScale*
GetPixelGreen(p),blur.x2*QuantumScale*
GetPixelRed(p),blur.y2*QuantumScale*
GetPixelGreen(p));
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel);
SetPixelPacket(destination_image,&pixel,r,destination_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
composite_view=DestroyCacheView(composite_view);
destination_view=DestroyCacheView(destination_view);
composite_image=destination_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*composite_view,
*destination_view,
*image_view;
MagickPixelPacket
pixel;
MagickRealType
horizontal_scale,
vertical_scale;
PointInfo
center,
offset;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict r;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
destination_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (destination_image == (Image *) NULL)
return(MagickFalse);
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue|HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (composite_image->columns-1.0)/
2.0;
vertical_scale=(MagickRealType) (composite_image->rows-1.0)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1.0)/2.0;
vertical_scale=(MagickRealType) (image->rows-1.0)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(composite_image->columns-1.0)/200.0;
vertical_scale*=(composite_image->rows-1.0)/200.0;
}
else
{
horizontal_scale*=(image->columns-1.0)/200.0;
vertical_scale*=(image->rows-1.0)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) == 0)
center.x=(MagickRealType) x_offset+(composite_image->columns-1)/
2.0;
else
center.x=((MagickRealType) image->columns-1)/2.0;
else
if ((flags & AspectValue) == 0)
center.x=(MagickRealType) x_offset+geometry_info.xi;
else
center.x=geometry_info.xi;
if ((flags & YValue) == 0)
if ((flags & AspectValue) == 0)
center.y=(MagickRealType) y_offset+(composite_image->rows-1)/2.0;
else
center.y=((MagickRealType) image->rows-1)/2.0;
else
if ((flags & AspectValue) == 0)
center.y=(MagickRealType) y_offset+geometry_info.psi;
else
center.y=geometry_info.psi;
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
pixel=zero;
exception=(&image->exception);
image_view=AcquireCacheView(image);
destination_view=AcquireCacheView(destination_image);
composite_view=AcquireCacheView(composite_image);
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
for (x=0; x < (ssize_t) composite_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
/*
Displace the offset.
*/
offset.x=(horizontal_scale*(GetPixelRed(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(vertical_scale*(GetPixelGreen(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale*
pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p)));
SetPixelPacket(destination_image,&pixel,r,destination_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
break;
}
destination_view=DestroyCacheView(destination_view);
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
composite_image=destination_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
destination_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
destination_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
destination_dissolve=geometry_info.sigma/100.0;
if ((destination_dissolve-MagickEpsilon) < 0.0)
destination_dissolve=0.0;
modify_outside_overlay=MagickTrue;
if ((destination_dissolve+MagickEpsilon) > 1.0 )
{
destination_dissolve=1.0;
modify_outside_overlay=MagickFalse;
}
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
destination_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
destination_dissolve=geometry_info.sigma/100.0;
modify_outside_overlay=MagickTrue;
if ((destination_dissolve+MagickEpsilon) > 1.0)
modify_outside_overlay=MagickFalse;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the brightness and saturation scale.
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_brightness=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_saturation=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
This Composition method is depreciated
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
value=GetImageArtifact(composite_image,"compose:outside-overlay");
if (value != (const char *) NULL)
modify_outside_overlay=IsMagickTrue(value);
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
GetMagickPixelPacket(composite_image,&zero);
exception=(&image->exception);
image_view=AcquireCacheView(image);
composite_view=AcquireCacheView(composite_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*pixels;
double
brightness,
hue,
saturation;
MagickPixelPacket
composite,
destination,
source;
register const IndexPacket
*restrict composite_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
if (modify_outside_overlay == MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) composite_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(PixelPacket *) NULL;
p=(PixelPacket *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows))
{
p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset,
composite_image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset;
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
composite_indexes=GetCacheViewVirtualIndexQueue(composite_view);
source=zero;
destination=zero;
hue=0.0;
saturation=0.0;
brightness=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (modify_outside_overlay == MagickFalse)
{
if (x < x_offset)
{
q++;
continue;
}
if ((x-x_offset) >= (ssize_t) composite_image->columns)
break;
}
destination.red=(MagickRealType) GetPixelRed(q);
destination.green=(MagickRealType) GetPixelGreen(q);
destination.blue=(MagickRealType) GetPixelBlue(q);
if (image->matte != MagickFalse)
destination.opacity=(MagickRealType) GetPixelOpacity(q);
if (image->colorspace == CMYKColorspace)
destination.index=(MagickRealType) GetPixelIndex(indexes+x);
if (image->colorspace == CMYKColorspace)
{
destination.red=(MagickRealType) QuantumRange-destination.red;
destination.green=(MagickRealType) QuantumRange-destination.green;
destination.blue=(MagickRealType) QuantumRange-destination.blue;
destination.index=(MagickRealType) QuantumRange-destination.index;
}
/*
Handle destination modifications outside overlaid region.
*/
composite=destination;
if ((pixels == (PixelPacket *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) composite_image->columns))
{
switch (compose)
{
case DissolveCompositeOp:
case BlendCompositeOp:
{
composite.opacity=(MagickRealType) (QuantumRange-
destination_dissolve*(QuantumRange-composite.opacity));
break;
}
case ClearCompositeOp:
case SrcCompositeOp:
{
CompositeClear(&destination,&composite);
break;
}
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
composite.opacity=(MagickRealType) TransparentOpacity;
break;
}
default:
{
(void) GetOneVirtualMagickPixel(composite_image,x-x_offset,
y-y_offset,&composite,exception);
break;
}
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,ClampToQuantum(composite.red));
SetPixelGreen(q,ClampToQuantum(composite.green));
SetPixelBlue(q,ClampToQuantum(composite.blue));
if (image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,ClampToQuantum(composite.index));
q++;
continue;
}
/*
Handle normal overlay of source onto destination.
*/
source.red=(MagickRealType) GetPixelRed(p);
source.green=(MagickRealType) GetPixelGreen(p);
source.blue=(MagickRealType) GetPixelBlue(p);
if (composite_image->matte != MagickFalse)
source.opacity=(MagickRealType) GetPixelOpacity(p);
if (composite_image->colorspace == CMYKColorspace)
source.index=(MagickRealType) GetPixelIndex(composite_indexes+
x-x_offset);
if (composite_image->colorspace == CMYKColorspace)
{
source.red=(MagickRealType) QuantumRange-source.red;
source.green=(MagickRealType) QuantumRange-source.green;
source.blue=(MagickRealType) QuantumRange-source.blue;
source.index=(MagickRealType) QuantumRange-source.index;
}
switch (compose)
{
/* Duff-Porter Compositions */
case ClearCompositeOp:
{
CompositeClear(&destination,&composite);
break;
}
case SrcCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
{
composite=source;
break;
}
case NoCompositeOp:
case DstCompositeOp:
break;
case OverCompositeOp:
case SrcOverCompositeOp:
{
MagickPixelCompositeOver(&source,source.opacity,&destination,
destination.opacity,&composite);
break;
}
case DstOverCompositeOp:
{
MagickPixelCompositeOver(&destination,destination.opacity,&source,
source.opacity,&composite);
break;
}
case SrcInCompositeOp:
case InCompositeOp:
{
CompositeIn(&source,&destination,&composite);
break;
}
case DstInCompositeOp:
{
CompositeIn(&destination,&source,&composite);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
CompositeOut(&source,&destination,&composite);
break;
}
case DstOutCompositeOp:
{
CompositeOut(&destination,&source,&composite);
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
CompositeAtop(&source,&destination,&composite);
break;
}
case DstAtopCompositeOp:
{
CompositeAtop(&destination,&source,&composite);
break;
}
case XorCompositeOp:
{
CompositeXor(&source,&destination,&composite);
break;
}
/* Mathematical Compositions */
case PlusCompositeOp:
{
CompositePlus(&source,&destination,channel,&composite);
break;
}
case MinusDstCompositeOp:
{
CompositeMinus(&source,&destination,channel,&composite);
break;
}
case MinusSrcCompositeOp:
{
CompositeMinus(&destination,&source,channel,&composite);
break;
}
case ModulusAddCompositeOp:
{
CompositeModulusAdd(&source,&destination,channel,&composite);
break;
}
case ModulusSubtractCompositeOp:
{
CompositeModulusSubtract(&source,&destination,channel,&composite);
break;
}
case DifferenceCompositeOp:
{
CompositeDifference(&source,&destination,channel,&composite);
break;
}
case ExclusionCompositeOp:
{
CompositeExclusion(&source,&destination,channel,&composite);
break;
}
case MultiplyCompositeOp:
{
CompositeMultiply(&source,&destination,channel,&composite);
break;
}
case ScreenCompositeOp:
{
CompositeScreen(&source,&destination,channel,&composite);
break;
}
case DivideDstCompositeOp:
{
CompositeDivide(&source,&destination,channel,&composite);
break;
}
case DivideSrcCompositeOp:
{
CompositeDivide(&destination,&source,channel,&composite);
break;
}
case DarkenCompositeOp:
{
CompositeDarken(&source,&destination,channel,&composite);
break;
}
case LightenCompositeOp:
{
CompositeLighten(&source,&destination,channel,&composite);
break;
}
case DarkenIntensityCompositeOp:
{
CompositeDarkenIntensity(&source,&destination,channel,&composite);
break;
}
case LightenIntensityCompositeOp:
{
CompositeLightenIntensity(&source,&destination,channel,&composite);
break;
}
case MathematicsCompositeOp:
{
CompositeMathematics(&source,&destination,channel,&geometry_info,
&composite);
break;
}
/* Lighting Compositions */
case ColorDodgeCompositeOp:
{
CompositeColorDodge(&source,&destination,&composite);
break;
}
case ColorBurnCompositeOp:
{
CompositeColorBurn(&source,&destination,&composite);
break;
}
case LinearDodgeCompositeOp:
{
CompositeLinearDodge(&source,&destination,&composite);
break;
}
case LinearBurnCompositeOp:
{
CompositeLinearBurn(&source,&destination,&composite);
break;
}
case HardLightCompositeOp:
{
CompositeHardLight(&source,&destination,&composite);
break;
}
case OverlayCompositeOp:
{
/* Overlay = Reversed HardLight. */
CompositeHardLight(&destination,&source,&composite);
break;
}
case SoftLightCompositeOp:
{
CompositeSoftLight(&source,&destination,&composite);
break;
}
case LinearLightCompositeOp:
{
CompositeLinearLight(&source,&destination,&composite);
break;
}
case PegtopLightCompositeOp:
{
CompositePegtopLight(&source,&destination,&composite);
break;
}
case VividLightCompositeOp:
{
CompositeVividLight(&source,&destination,&composite);
break;
}
case PinLightCompositeOp:
{
CompositePinLight(&source,&destination,&composite);
break;
}
/* Other Composition */
case ChangeMaskCompositeOp:
{
if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) ||
(IsMagickColorSimilar(&source,&destination) != MagickFalse))
composite.opacity=(MagickRealType) TransparentOpacity;
else
composite.opacity=(MagickRealType) OpaqueOpacity;
break;
}
case BumpmapCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
CompositeBumpmap(&source,&destination,&composite);
break;
}
case DissolveCompositeOp:
{
MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange-
source_dissolve*(QuantumRange-source.opacity)),&destination,
(MagickRealType) (QuantumRange-destination_dissolve*(QuantumRange-
destination.opacity)),&composite);
break;
}
case BlendCompositeOp:
{
MagickPixelCompositeBlend(&source,source_dissolve,&destination,
destination_dissolve,&composite);
break;
}
case ThresholdCompositeOp:
{
CompositeThreshold(&source,&destination,threshold,amount,&composite);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (source.opacity == TransparentOpacity)
break;
offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint);
if (offset == 0)
break;
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
brightness+=(0.01*percent_brightness*offset)/midpoint;
saturation*=0.01*percent_saturation;
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
break;
}
case HueCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&hue,&sans,&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case SaturateCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&sans,&saturation,
&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case LuminizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&sans,&sans,
&brightness);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case ColorizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&sans,
&sans,&brightness);
CompositeHSB(source.red,source.green,source.blue,&hue,&saturation,
&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
composite.red=source.red;
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
composite.green=source.green;
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
composite.blue=source.blue;
break;
}
case CopyOpacityCompositeOp:
{
if (source.matte == MagickFalse)
{
composite.opacity=(MagickRealType) (QuantumRange-
MagickPixelIntensityToQuantum(&source));
break;
}
composite.opacity=source.opacity;
break;
}
case CopyBlackCompositeOp:
{
if (source.colorspace != CMYKColorspace)
ConvertRGBToCMYK(&source);
composite.index=source.index;
break;
}
/* compose methods that are already handled */
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
composite=source;
break;
}
default:
break;
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,ClampToQuantum(composite.red));
SetPixelGreen(q,ClampToQuantum(composite.green));
SetPixelBlue(q,ClampToQuantum(composite.blue));
SetPixelOpacity(q,ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,ClampToQuantum(composite.index));
p++;
if (p >= (pixels+composite_image->columns))
p=pixels;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImageChannel)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
if (destination_image != (Image * ) NULL)
destination_image=DestroyImage(destination_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
(void) SetImageVirtualPixelMethod(texture,TileVirtualPixelMethod);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse) ||
(texture->matte != MagickFalse)))
{
/*
Tile texture onto the image background.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,image->compose,texture,x+
texture->tile_offset.x,y+texture->tile_offset.y);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TextureImage)
#endif
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
texture_view=AcquireCacheView(texture);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*texture_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
size_t
width;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(texture_view,texture->tile_offset.x,(y+
texture->tile_offset.y) % texture->rows,texture->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
texture_indexes=GetCacheViewVirtualIndexQueue(texture_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns)
{
width=texture->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
(void) CopyMagickMemory(q,p,width*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(texture->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(indexes,texture_indexes,width*
sizeof(*indexes));
indexes+=width;
}
q+=width;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TextureImage)
#endif
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
return(status);
}
|
target_data-6.c | /* { dg-do run } */
/* { dg-require-effective-target offload_device_nonshared_as } */
#include <stdlib.h>
#include <omp.h>
#define EPS 0.000001
#define THRESHOLD 1000
const int MAX = 1800;
void check (float *a, float *b, int N)
{
int i;
for (i = 0; i < N; i++)
if (a[i] - b[i] > EPS || b[i] - a[i] > EPS)
abort ();
}
void init (float *a1, float *a2, int N)
{
float s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void init_again (float *a1, float *a2, int N)
{
float s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s * 10;
a2[i] = i;
s = -s;
}
}
void vec_mult_ref (float *p, float *v1, float *v2, int N)
{
int i;
init (v1, v2, N);
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
init_again (v1, v2, N);
for (i = 0; i < N; i++)
p[i] = p[i] + (v1[i] * v2[i]);
}
void vec_mult (float *p, float *v1, float *v2, int N)
{
int i;
init (v1, v2, N);
#pragma omp target data if(N > THRESHOLD) map(from: p[0:N])
{
#pragma omp target if (N > THRESHOLD) map(to: v1[:N], v2[:N])
{
if (omp_is_initial_device ())
abort;
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
init_again (v1, v2, N);
#pragma omp target if (N > THRESHOLD) map(to: v1[:N], v2[:N])
{
if (omp_is_initial_device ())
abort ();
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = p[i] + (v1[i] * v2[i]);
}
}
}
int main ()
{
float *p1 = (float *) malloc (MAX * sizeof (float));
float *p2 = (float *) malloc (MAX * sizeof (float));
float *v1 = (float *) malloc (MAX * sizeof (float));
float *v2 = (float *) malloc (MAX * sizeof (float));
vec_mult_ref (p1, v1, v2, MAX);
vec_mult (p2, v1, v2, MAX);
check (p1, p2, MAX);
free (p1);
free (p2);
free (v1);
free (v2);
return 0;
}
|
GB_binop__le_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool)
// A*D function (colscale): GB (_AxD__le_bool)
// D*A function (rowscale): GB (_DxB__le_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__le_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__le_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool)
// C=scalar+B GB (_bind1st__le_bool)
// C=scalar+B' GB (_bind1st_tran__le_bool)
// C=A+scalar GB (_bind2nd__le_bool)
// C=A'+scalar GB (_bind2nd_tran__le_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_int8
// A.*B function (eWiseMult): GB_AemultB__lor_int8
// A*D function (colscale): GB_AxD__lor_int8
// D*A function (rowscale): GB_DxB__lor_int8
// C+=B function (dense accum): GB_Cdense_accumB__lor_int8
// C+=b function (dense accum): GB_Cdense_accumb__lor_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int8
// C=scalar+B GB_bind1st__lor_int8
// C=scalar+B' GB_bind1st_tran__lor_int8
// C=A+scalar GB_bind2nd__lor_int8
// C=A'+scalar GB_bind2nd_tran__lor_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bpmfutils.h | #pragma once
#include <chrono>
#include <Eigen/Sparse>
#include <Eigen/Dense>
#include <cmath>
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <memory>
#include "sparsetensor.h"
inline double tick() {
return std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
}
inline double clamp(double x, double min, double max) {
return x < min ? min : (x > max ? max : x);
}
inline std::pair<double, double> getMinMax(const Eigen::SparseMatrix<double> &mat) {
double min = INFINITY;
double max = -INFINITY;
for (int k = 0; k < mat.outerSize(); ++k) {
for (Eigen::SparseMatrix<double>::InnerIterator it(mat,k); it; ++it) {
double v = it.value();
if (v < min) min = v;
if (v > max) max = v;
}
}
return std::make_pair(min, max);
}
inline void split_work_mpi(int num_latent, int num_nodes, int* work) {
double avg_work = num_latent / (double) num_nodes;
int work_unit;
if (2 <= avg_work) work_unit = 2;
else work_unit = 1;
int min_work = work_unit * (int)floor(avg_work / work_unit);
int work_left = num_latent;
for (int i = 0; i < num_nodes; i++) {
work[i] = min_work;
work_left -= min_work;
}
int i = 0;
while (work_left > 0) {
int take = std::min(work_left, work_unit);
work[i] += take;
work_left -= take;
i = (i + 1) % num_nodes;
}
}
inline void sparseFromIJV(Eigen::SparseMatrix<double> &X, int* rows, int* cols, double* values, int N) {
typedef Eigen::Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(rows[n], cols[n], values[n]));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline void sparseFromIJV(Eigen::SparseMatrix<double> &X, Eigen::MatrixXi &idx, Eigen::VectorXd &values) {
if (idx.rows() != values.size()) {
throw std::runtime_error("sparseFromIJV: idx.rows() must equal values.size().");
}
if (idx.cols() != 2) {
throw std::runtime_error("sparseFromIJV: idx.cols() must be equal to 2.");
}
typedef Eigen::Triplet<double> T;
std::vector<T> tripletList;
int N = values.size();
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(idx(n, 0), idx(n, 1), values(n)));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline double square(double x) { return x * x; }
inline std::pair<double,double> eval_rmse(Eigen::SparseMatrix<double> & P, const int n, Eigen::VectorXd & predictions, Eigen::VectorXd & predictions_var, const Eigen::MatrixXd &sample_m, const Eigen::MatrixXd &sample_u, double mean_rating)
{
double se = 0.0, se_avg = 0.0;
#pragma omp parallel for schedule(dynamic,8) reduction(+:se, se_avg)
for (int k = 0; k < P.outerSize(); ++k) {
int idx = P.outerIndexPtr()[k];
for (Eigen::SparseMatrix<double>::InnerIterator it(P,k); it; ++it) {
const double pred = sample_m.col(it.col()).dot(sample_u.col(it.row())) + mean_rating;
se += square(it.value() - pred);
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
double pred_avg;
if (n == 0) {
pred_avg = pred;
} else {
double delta = pred - predictions[idx];
pred_avg = (predictions[idx] + delta / (n + 1));
predictions_var[idx] += delta * (pred - pred_avg);
}
se_avg += square(it.value() - pred_avg);
predictions[idx++] = pred_avg;
}
}
const unsigned N = P.nonZeros();
const double rmse = sqrt( se / N );
const double rmse_avg = sqrt( se_avg / N );
return std::make_pair(rmse, rmse_avg);
}
std::pair<double,double> eval_rmse_tensor(
SparseMode & sparseMode,
const int Nepoch,
Eigen::VectorXd & predictions,
Eigen::VectorXd & predictions_var,
std::vector< std::unique_ptr<Eigen::MatrixXd> > & samples,
double mean_value);
inline void row_mean_var(Eigen::VectorXd & mean, Eigen::VectorXd & var, const Eigen::MatrixXd X) {
const int N = X.cols();
const int D = X.rows();
mean.resize(D);
var.resize(D);
mean.setZero();
var.setZero();
#pragma omp parallel
{
Eigen::VectorXd tmp(D);
tmp.setZero();
#pragma omp for schedule(static)
for (int i = 0; i < N; i++) {
for (int d = 0; d < D; d++) {
tmp(d) += X(d, i);
}
}
#pragma omp critical
{
mean += tmp;
}
}
// computing mean
mean /= N;
#pragma omp parallel
{
Eigen::VectorXd tmp(D);
tmp.setZero();
#pragma omp for schedule(static)
for (int i = 0; i < N; i++) {
for (int d = 0; d < D; d++) {
tmp(d) += square(X(d, i) - mean(d));
}
}
#pragma omp critical
{
var += tmp;
}
}
var /= N;
}
inline void writeToCSVfile(std::string filename, Eigen::MatrixXd matrix) {
const static Eigen::IOFormat csvFormat(6, Eigen::DontAlignCols, ",", "\n");
std::ofstream file(filename.c_str());
file << matrix.format(csvFormat);
}
inline std::string to_string_with_precision(const double a_value, const int n = 6)
{
std::ostringstream out;
out << std::setprecision(n) << a_value;
return out.str();
}
inline double auc(Eigen::VectorXd & pred, Eigen::VectorXd & test)
{
Eigen::VectorXd stack_x(pred.size());
Eigen::VectorXd stack_y(pred.size());
double auc = 0.0;
if (pred.size() == 0) {
return NAN;
}
std::vector<unsigned int> permutation( pred.size() );
for(unsigned int i = 0; i < pred.size(); i++) {
permutation[i] = i;
}
std::sort(permutation.begin(), permutation.end(), [&pred](unsigned int a, unsigned int b) { return pred[a] < pred[b];});
double NP = test.sum();
double NN = test.size() - NP;
//Build stack_x and stack_y
stack_x[0] = test[permutation[0]];
stack_y[0] = 1-stack_x[0];
for(int i=1; i < pred.size(); i++) {
stack_x[i] = stack_x[i-1] + test[permutation[i]];
stack_y[i] = stack_y[i-1] + 1 - test[permutation[i]];
}
for(int i=0; i < pred.size() - 1; i++) {
auc += (stack_x(i+1) - stack_x(i)) * stack_y(i+1); //TODO:Make it Eigen
}
return auc / (NP*NN);
}
|
GB_unop__minv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_bool_bool
// op(A') function: GB_unop_tran__minv_bool_bool
// C type: bool
// A type: bool
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = true ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = true ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare-simd-fix.h | #ifndef LLVM_CLANG_TEST_OPENMP_INPUTS_DECLARE_SIMD_FIX_H
#define LLVM_CLANG_TEST_OPENMP_INPUTS_DECLARE_SIMD_FIX_H
#pragma omp declare simd
float foo(float a, float b, int c);
float bar(float a, float b, int c);
#endif
|
kd_tree_index.h | #ifndef panene_base_index_h
#define panene_base_index_h
#include <vector>
#include <algorithm>
#include <random>
#include <util/allocator.h>
#include <util/heap.h>
#include <util/dynamic_bitset.h>
#include <util/random.h>
#include <util/result_set.h>
#include <dist.h>
#include <map>
#include <kd_tree.h>
#include <roaring/roaring.hh>
#define USE_KDTREE_INDEX_SYMBOLS public: \
typedef typename KDTreeIndex<DataSource>::Distance Distance;\
typedef typename KDTreeIndex<DataSource>::IDType IDType;\
typedef typename KDTreeIndex<DataSource>::DistanceType DistanceType;\
typedef typename KDTreeIndex<DataSource>::ElementType ElementType;\
typedef typename KDTreeIndex<DataSource>::Node Node;\
typedef typename KDTreeIndex<DataSource>::NodePtr NodePtr;\
typedef typename KDTreeIndex<DataSource>::BranchSt BranchSt;\
typedef typename KDTreeIndex<DataSource>::Branch Branch;\
typedef typename KDTreeIndex<DataSource>::NodeSplit NodeSplit;\
using KDTreeIndex<DataSource>::size;\
using KDTreeIndex<DataSource>::dataSource;\
using KDTreeIndex<DataSource>::numTrees;\
using KDTreeIndex<DataSource>::trees;\
using KDTreeIndex<DataSource>::pool;\
using KDTreeIndex<DataSource>::dim;\
using KDTreeIndex<DataSource>::meanSplit;\
using KDTreeIndex<DataSource>::divideTree;\
using KDTreeIndex<DataSource>::findNeighbors;
namespace panene
{
struct IndexParams {
int trees;
size_t randomDimNum;
IndexParams(int trees_ = 4, size_t randomDimNum_ = 5) : trees(trees_), randomDimNum(randomDimNum_) {}
};
struct SearchParams {
int checks; // 32
float eps; // 0
int sorted;
int cores;
Roaring *mask = nullptr;
SearchParams(int checks_ = 32, float eps_ = 0, int sorted_ = 0, int cores_ = 0) : checks(checks_), eps(eps_), sorted(sorted_), cores(cores_) {}
};
template <typename DataSource>
class KDTreeIndex
{
public:
typedef typename DataSource::Distance Distance;
typedef typename DataSource::IDType IDType;
typedef typename DataSource::ElementType ElementType;
typedef typename DataSource::DistanceType DistanceType;
public:
struct Node {
int divfeat;
DistanceType divval;
IDType id; // point id for a leaf node, 0 otherwise
KDTree<struct Node *> *tree;
Node *child1, *child2;
Node(KDTree<struct Node *> *tree_) {
child1 = child2 = nullptr;
tree = tree_;
}
~Node() {
if (child1 != nullptr) { child1->~Node(); child1 = nullptr; }
if (child2 != nullptr) { child2->~Node(); child2 = nullptr; }
}
};
struct NodeSplit {
struct Node *node;
IDType *begin;
int count;
int depth;
NodeSplit(Node* node_, IDType *begin_, int count_, int depth_) : node(node_), begin(begin_), count(count_), depth(depth_) {}
};
template <typename T, typename DistanceType>
struct BranchStruct
{
T node; /* Tree node at which search resumes */
DistanceType mindist; /* Minimum distance to query for all nodes below. */
BranchStruct() {}
BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {}
bool operator<(const BranchStruct<T, DistanceType>& rhs) const
{
return mindist < rhs.mindist;
}
};
typedef Node* NodePtr;
typedef BranchStruct<NodePtr, DistanceType> BranchSt;
typedef BranchSt* Branch;
public:
KDTreeIndex(DataSource *dataSource_, IndexParams indexParams_, Distance distance_ = Distance())
: dataSource(dataSource_), indexParams(indexParams_), distance(distance_) {
numTrees = indexParams_.trees;
trees.resize(numTrees);
dim = dataSource->dim();
for (size_t i = 0; i < numTrees; ++i) {
trees[i] = new KDTree<NodePtr>(dataSource->capacity());
}
}
virtual ~KDTreeIndex() {
for (size_t i = 0; i < numTrees; ++i) {
delete trees[i];
}
}
void removePoint(size_t id) {
if (!removed) {
removedPoints.resize(dataSource->size());
removedPoints.reset();
removed = true;
}
if (!removedPoints.test(id)) {
removedPoints.set(id);
removedCount++;
}
}
size_t getSize() { return size; }
int usedMemory() const {
return int(pool.usedMemory + pool.wastedMemory + size * sizeof(int)); // pool memory and vind array memory
}
/**
* Find set of nearest neighbors to vec. Their ids are stored inside
* the result object.
*
* Params:
* result = the result object in which the ids of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
float findNeighbors(const std::vector<ElementType> &vec, ResultSet<IDType, DistanceType> &result, const SearchParams& searchParams) const
{
int maxChecks = searchParams.checks;
float epsError = 1 + searchParams.eps;
Roaring *mask = searchParams.mask;
float costSum;
if (removed) {
costSum = getNeighbors<true>(vec, result, maxChecks, epsError, mask);
}
else {
costSum = getNeighbors<false>(vec, result, maxChecks, epsError, mask);
}
// TODO
for (size_t i = 0; i < result.k; ++i)
result.distances[i] = sqrt(result.distances[i]);
return costSum;
}
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
float getNeighbors(const std::vector<ElementType> &vec, ResultSet<IDType, DistanceType> &result, int maxCheck, float epsError, Roaring *mask) const
{
BranchSt branch;
int checkCount = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size);
DynamicBitset checked(size);
float costSum = 0;
size_t visitedLeaves = 0;
float cost;
/* Search once through each tree down to root. */
for (size_t i = 0; i < numTrees; ++i) {
cost = searchLevel<with_removed>(vec, result, trees[i]->root, 0, checkCount, maxCheck, epsError, heap, checked, mask);
if (cost > 0) {
costSum += cost;
visitedLeaves++;
}
}
/* Keep searching other branches from heap until finished. */
while (heap->popMin(branch) && (checkCount < maxCheck || !result.full())) {
cost = searchLevel<with_removed>(vec, result, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked, mask);
if (cost > 0) {
costSum += cost;
visitedLeaves++;
}
}
delete heap;
return std::max((float)(costSum - (float)visitedLeaves * std::log2(size)), 0.0f);
}
/**
* Search starting from a given node of the tree. Based on any mismatches at
* higher levels, all exemplars below this level must have a distance of
* at least "mindistsq".
*/
template<bool with_removed>
float searchLevel(const std::vector<ElementType> &vec, ResultSet<IDType, DistanceType> &result_set, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
float epsError, Heap<BranchSt>* heap, DynamicBitset& checked, Roaring *mask) const
{
if (result_set.worstDist < mindist) {
// printf("Ignoring branch, too far\n");
return 0;
}
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL) && (node->child2 == NULL)) {
IDType id = node->id;
float cost = node->tree->incrementFreqByOne(id);
// std::cerr << node->tree->insertionLog[node->id].depth << std::endl;
if (with_removed && removedPoints.test(id)) return cost;
if (mask != nullptr && mask->contains(id)) return cost;
/* Do not check same node more than once when searching multiple numTrees. */
if (checked.test(id) || ((checkCount >= maxCheck) && result_set.full())) return cost;
checked.set(id);
checkCount++;
DistanceType dist = dataSource->getSquaredDistance(id, vec);
result_set << Neighbor<IDType, DistanceType>(id, dist);
return cost;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat]; //dataSource->get(qid, node -> divfeat);
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance.accum_dist(val, node->divval, node->divfeat);
// if (2 * checkCount < maxCheck || !result.full()) {
if ((new_distsq*epsError < result_set.worstDist) || !result_set.full()) {
heap->insert(BranchSt(otherChild, new_distsq));
}
/* Call recursively to search next level down. */
return searchLevel<with_removed>(vec, result_set, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked, mask);
}
NodePtr divideTree(KDTree<NodePtr>* tree, IDType *ids, size_t count, size_t depth) {
NodePtr node = new(pool) Node(tree);
/* If too few exemplars remain, then make this a leaf node. */
if (count == 1) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->divfeat = -1; // a leaf node
IDType id = ids[0];
node->id = id;
tree->setInsertionLog(id, 1, depth); // when we build a tree, set the frequency of all points to 1
}
else {
int idx;
int cutfeat;
DistanceType cutval;
meanSplit(ids, count, idx, cutfeat, cutval);
node->divfeat = cutfeat;
node->divval = cutval;
node->child1 = divideTree(tree, ids, idx, depth + 1);
node->child2 = divideTree(tree, ids + idx, count - idx, depth + 1);
}
return node;
}
/**
* Choose which feature to use in order to subdivide this set of vectors.
* Make a random choice among those with the highest variance, and use
* its variance as the threshold value.
**/
void meanSplit(IDType *ids, int count, int &index, int &cutfeat, DistanceType &cutval) {
/* Compute mean values. Only the first SAMPLE_MEAN values need to be
sampled to get a good estimate.
*/
int sampleCount = (std::min)((int)SAMPLE_MEAN + 1, count);
std::vector<DistanceType> mean(dim), var(dim);
dataSource->computeMeanAndVar(ids, sampleCount, mean, var);
/* Select one of the highest variance ids at random. */
cutfeat = selectDivision(var);
cutval = mean[cutfeat];
int lim1, lim2;
planeSplit(ids, count, cutfeat, cutval, lim1, lim2);
if (lim1 > count / 2) index = lim1;
else if (lim2 < count / 2) index = lim2;
else index = count / 2;
/* If either list is empty, it means that all remaining features
* are identical. Split in the middle to maintain a balanced tree.
*/
if ((lim1 == count) || (lim2 == 0)) index = count / 2;
}
/**
* Select the top RAND_DIM largest values from v and return the index of
* one of these selected at random.
*/
int selectDivision(const std::vector<DistanceType> &v)
{
int num = 0;
std::vector<size_t> topind(indexParams.randomDimNum);
/* Create a list of the ids of the top RAND_DIM values. */
for (size_t i = 0; i < dim; ++i) {
if ((num < indexParams.randomDimNum) || (v[i] > v[topind[num - 1]])) {
/* Put this element at end of topind. */
if (num < indexParams.randomDimNum) {
topind[num++] = i; /* Add to list. */
}
else {
topind[num - 1] = i; /* Replace last element. */
}
/* Bubble end value down to right location by repeated swapping. */
int j = num - 1;
while (j > 0 && v[topind[j]] > v[topind[j - 1]]) {
std::swap(topind[j], topind[j - 1]);
--j;
}
}
}
/* Select a random integer in range [0,num-1], and return that index. */
int rnd = rand_int(num);
return (int)topind[rnd];
}
/**
* Subdivide the list of points by a plane perpendicular on axe corresponding
* to the 'cutfeat' dimension at 'cutval' position.
*
* On return:
* dataset[ind[0..lim1-1]][cutfeat]<cutval
* dataset[ind[lim1..lim2-1]][cutfeat]==cutval
* dataset[ind[lim2..count]][cutfeat]>cutval
*/
void planeSplit(IDType *ids, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
{
/* Move vector ids for left subtree to front of list. */
int left = 0;
int right = count - 1;
for (;; ) {
while (left <= right && dataSource->get(ids[left], cutfeat) < cutval) ++left; // TODO
while (left <= right && dataSource->get(ids[right], cutfeat) >= cutval) --right; // TODO
if (left > right) break;
std::swap(ids[left], ids[right]); ++left; --right;
}
lim1 = left;
right = count - 1;
for (;; ) {
while (left <= right && dataSource->get(ids[left], cutfeat) <= cutval) ++left; // TODO
while (left <= right && dataSource->get(ids[right], cutfeat) > cutval) --right; // TODO
if (left > right) break;
std::swap(ids[left], ids[right]); ++left; --right;
}
lim2 = left;
}
size_t addPoints(size_t newPoints) {
size_t oldSize = size;
size += newPoints;
if (size > dataSource->size())
size = dataSource->size();
if (sizeAtBuild * 2 < size) {
buildIndex();
}
else {
for (size_t i = oldSize; i < size; ++i) {
for (size_t j = 0; j < numTrees; ++j) {
addPointToTree(trees[j], trees[j]->root, i, 0);
}
}
}
return size - oldSize;
}
size_t update(int ops) {
throw;
}
void knnSearch(
const IDType &qid,
ResultSet<IDType, DistanceType> &resultSets,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<ElementType>> vectors(1);
vectors[0].resize(dim);
dataSource->get(qid, vectors[0]);
knnSearch(vectors, resultSets, knn, params);
}
void knnSearch(
const std::vector<std::vector<ElementType>> &vectors,
std::vector<ResultSet<IDType, DistanceType>> &resultSets,
size_t knn,
const SearchParams& params) const
{
#pragma omp parallel num_threads(params.cores)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)vectors.size(); i++) {
findNeighbors(vectors[i], resultSets[i], params);
//ids_to_ids(ids[i], ids[i], n);
}
}
}
protected:
void buildIndex() {
sizeAtBuild = size;
std::vector<IDType> ids(size);
for (size_t i = 0; i < size; ++i) {
ids[i] = IDType(i);
}
trees.resize(numTrees);
freeIndex();
for (size_t i = 0; i < numTrees; ++i) {
std::random_shuffle(ids.begin(), ids.end());
trees[i] = new KDTree<NodePtr>(dataSource->capacity());
trees[i]->root = divideTree(trees[i], &ids[0], size, 1);
trees[i]->size = size;
trees[i]->cost = trees[i]->computeCost();
}
}
void freeIndex() {
for (size_t i = 0; i < numTrees; ++i) {
if (trees[i] != nullptr) trees[i]->~KDTree();
}
pool.free();
}
void addPointToTree(KDTree<NodePtr>* tree, NodePtr node, IDType id, int depth) {
if ((node->child1 == NULL) && (node->child2 == NULL)) {
size_t nodeId = node->id;
size_t divfeat = dataSource->findDimWithMaxSpan(id, node->id);
NodePtr left = new(pool) Node(tree);
left->child1 = left->child2 = NULL;
NodePtr right = new(pool) Node(tree);
right->child1 = right->child2 = NULL;
ElementType pointValue = dataSource->get(id, divfeat);
ElementType leafValue = dataSource->get(node->id, divfeat);
if (pointValue < leafValue) {
left->id = id;
right->id = node->id;
}
else {
left->id = node->id;
right->id = id;
}
left->divfeat = right->divfeat = -1;
node->divfeat = divfeat;
node->divval = (pointValue + leafValue) / 2;
node->child1 = left;
node->child2 = right;
// incrementally update imbalance
tree->setInsertionLog(id, 0, depth + 1);
tree->incrementFreqAndDepthByOne(nodeId);
}
else {
if (dataSource->get(id, node->divfeat) < node->divval) {
addPointToTree(tree, node->child1, id, depth + 1);
}
else {
addPointToTree(tree, node->child2, id, depth + 1);
}
}
}
public:
std::vector<float> getCachedImbalances() {
std::vector<float> imbalances;
for (size_t i = 0; i < numTrees; ++i) {
imbalances.push_back(trees[i]->getCachedImbalance());
}
return imbalances;
}
std::vector<float> recomputeImbalances() {
std::vector<float> imbalances;
for (size_t i = 0; i < numTrees; ++i) {
imbalances.push_back(trees[i]->computeImbalance());
}
return imbalances;
}
size_t computeMaxDepth() {
size_t maxDepth = 0;
for (size_t j = 0; j < numTrees; ++j) {
size_t depth = trees[j]->computeMaxDepth();
if (maxDepth < depth)
maxDepth = depth;
}
return maxDepth;
}
std::map<size_t, size_t> computeCountDistribution() {
std::map<size_t, size_t> dict;
for (size_t i = 0; i < numTrees; ++i) {
const auto& partial = trees[i]->computeCountDistribution();
for (const auto& tuple : partial) {
if (dict.count(tuple.first) == 0)
dict[tuple.first] = 0;
dict[tuple.first] += tuple.second;
}
}
return dict;
}
public:
enum
{
SAMPLE_MEAN = 100,
};
DataSource *dataSource;
IndexParams indexParams;
Distance distance;
size_t numTrees;
size_t size = 0; // the number of points loaded into trees
size_t dim;
bool removed = false;
DynamicBitset removedPoints;
size_t removedCount = 0;
PooledAllocator pool;
std::vector<KDTree<NodePtr>*> trees;
size_t sizeAtBuild = 0;
};
}
#endif
|
GB_unop__identity_int8_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_fc32
// op(A') function: GB_unop_tran__identity_int8_fc32
// C type: int8_t
// A type: GxB_FC32_t
// cast: int8_t cij = GB_cast_to_int8_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_fc32
(
int8_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gi_topological_gradient_using_algorithms.h | /*
*
* Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu>
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#ifndef TOPOLOGICAL_GRADIENT_USING_ALGORITHMS_H
#define TOPOLOGICAL_GRADIENT_USING_ALGORITHMS_H
#include "gi_basic_types.h"
#include "gi_discrete_gradient_labeling.h"
//#include "gi_topological_explicit_mesh_function.h"
//#include "gi_topological_regular_grid.h"
//#include "gi_topological_regular_masked_restricted_grid.h"
#include <vector>
#include <queue>
#include <map>
#include <set>
namespace GInt {
template <class GridType, class MeshFunction, class LabelingType>
class TopologicalGradientUsingAlgorithms {
protected:
MeshFunction* my_mesh_function;
GridType* my_mesh_handler;
LabelingType* my_grad_field;
public:
TopologicalGradientUsingAlgorithms(
MeshFunction* mesh_function,
GridType* mesh_handler,
LabelingType* grad_field) :
my_mesh_function(mesh_function),
my_mesh_handler(mesh_handler),
my_grad_field(grad_field) {
}
// trace "down" in gradient and fill in the result std::vector
// with all cells that are found
//void CheckGradientForLoops() {
// DenseLabeling<char>* visited = new DenseLabeling<char>(my_mesh_handler->numCells());
// visited->SetAll(0);
// typename GridType::AllCellsIterator allit(my_mesh_handler);
// for (allit.begin(); allit.valid(); allit.advance()) {
// INDEX_TYPE cellid = allit.value();
void _recCheckForLoops(INDEX_TYPE id, std::set<INDEX_TYPE>& current_path, DenseLabeling<char>* visited) {
// this path down has been checked, so skip
if (visited->GetLabel(id) != 0) return;
if (my_grad_field->getCritical(id)) return;
if (my_mesh_handler->dimension(my_grad_field->getPair(id) < my_mesh_handler->dimension(id))) return;
current_path.insert(id);
INDEX_TYPE head = my_grad_field->getPair(id);
visited->SetLabel(id, 1);
visited->SetLabel(head, 1);
typename GridType::FacetsIterator fit(my_mesh_handler);
for (fit.begin(id); fit.valid(); fit.advance()) {
INDEX_TYPE fid = fit.value();
if (fid == id) continue; // skip going back to head
if (my_mesh_handler->dimension(my_grad_field->getPair(fid) < my_mesh_handler->dimension(fid))) continue; // stick to dimension
if (current_path.count(fid) != 0) {
printf("cycle detected");
}
_recCheckForLoops(fid, current_path, visited);
}
current_path.erase(id);
}
void CheckGradientForLoops() {
DenseLabeling<char>* visited = new DenseLabeling<char>(my_mesh_handler->numCells());
visited->SetAll(0);
// trace down
typename GridType::AllCellsIterator allit(my_mesh_handler);
for (allit.begin(); allit.valid(); allit.advance()) {
INDEX_TYPE cellid = allit.value();
if (visited->GetLabel(cellid) != 0) continue;
// skip critical points and heads of arrows, since we will search [tail->head->facets]*
if (my_grad_field->getCritical(cellid)) continue;
if (my_mesh_handler->dimension(my_grad_field->getPair(cellid) > my_mesh_handler->dimension(cellid))) continue;
// start a path down
std::set<INDEX_TYPE> path;
_recCheckForLoops(cellid, path, visited);
}
}
//}
void CheckGradientConsistency() {
typename GridType::AllCellsIterator allit(my_mesh_handler);
for (allit.begin(); allit.valid(); allit.advance()) {
INDEX_TYPE cellid = allit.value();
if (my_grad_field->getAssigned(cellid) == 0) {
printf("CheckGradientConsistency(): error: cellid %d is not assigned\n",cellid);
}
if (my_grad_field->getCritical(cellid)) {
}
else {
INDEX_TYPE pairid = my_grad_field->getPair(cellid);
if (my_grad_field->getCritical(pairid)) {
printf("CheckGradientConsistency(): error: cell %d is paired with critical cell %d\n", cellid, pairid);
}
else {
INDEX_TYPE pairpair = my_grad_field->getPair(pairid);
if (pairpair != cellid) {
printf("CheckGradientConsistency(): error: pair pair is not cellid (%d -> %d -> %d)\n", cellid, pairid, pairpair);
}
}
if (my_mesh_handler->dimension(pairid) != my_mesh_handler->dimension(cellid) - 1 &&
my_mesh_handler->dimension(pairid) != my_mesh_handler->dimension(cellid) + 1) {
printf("CheckGradientConsistency(): error: dimensions of cell (%d) and pair (%d) dont match\n",
my_mesh_handler->dimension(cellid), my_mesh_handler->dimension(pairid));
}
}
}
}
virtual void count_critical_points(int dim) {
int* counts = new int[dim];
for (int i = 0; i < dim; i++) counts[i] = 0;
for (INDEX_TYPE i = 0; i < my_mesh_handler->numCells(); i++) {
if (my_grad_field->getCritical(i))
counts[my_mesh_handler->dimension(i)]++;
}
for (int i = 0; i < dim; i++)
printf("index-%d=%d\n", i, counts[i]);
}
virtual void trace_down_cells(const INDEX_TYPE& cellid,
std::vector<INDEX_TYPE>& result) {
std::queue<INDEX_TYPE> cell_queue;
cell_queue.push(cellid);
result.clear();
std::set<INDEX_TYPE> cell_visited;
while (!cell_queue.empty()) {
INDEX_TYPE current = cell_queue.front();
cell_queue.pop();
cell_visited.insert(current);
result.push_back(current);
typename GridType::FacetsIterator fit(my_mesh_handler);
for (fit.begin(current); fit.valid(); fit.advance()) {
INDEX_TYPE temp_id = fit.value();
if (my_grad_field->getCritical(temp_id) &&
cell_visited.count(temp_id) == 0) {
result.push_back(temp_id);
cell_visited.insert(temp_id);
}
else if (cell_visited.count(temp_id) == 0) {
INDEX_TYPE pair = my_grad_field->getPair(temp_id);
result.push_back(temp_id);
result.push_back(pair);
cell_visited.insert(temp_id);
cell_visited.insert(pair);
cell_queue.push(pair);
}
}
}
}
virtual void trace_up_cells(const INDEX_TYPE& cellid,
std::vector<INDEX_TYPE>& result) const {
std::queue<INDEX_TYPE> cell_queue;
cell_queue.push(cellid);
result.clear();
std::set<INDEX_TYPE> cell_visited;
while (!cell_queue.empty()) {
INDEX_TYPE current = cell_queue.front();
cell_queue.pop();
cell_visited.insert(current);
result.push_back(current);
typename GridType::CofacetsIterator cofacets(my_mesh_handler);
for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) {
INDEX_TYPE temp_id = cofacets.value();
if (my_grad_field->getCritical(temp_id) &&
cell_visited.count(temp_id) == 0) {
result.push_back(temp_id);
cell_visited.insert(temp_id);
}
else if (cell_visited.count(temp_id) == 0) {
INDEX_TYPE pair = my_grad_field->getPair(temp_id);
result.push_back(temp_id);
result.push_back(pair);
cell_visited.insert(temp_id);
cell_visited.insert(pair);
cell_queue.push(pair);
}
}
}
}
virtual void trace_down_cells_restricted(const INDEX_TYPE& cellid,
std::vector<INDEX_TYPE>& result) {
std::queue<INDEX_TYPE> cell_queue;
cell_queue.push(cellid);
DIM_TYPE temp_dim = my_grad_field->getDimAscMan(cellid) + 1;
result.clear();
std::set<INDEX_TYPE> cell_visited;
while (!cell_queue.empty()) {
INDEX_TYPE current = cell_queue.front();
cell_queue.pop();
cell_visited.insert(current);
result.push_back(current);
typename GridType::FacetsIterator fit(my_mesh_handler);
for (fit.begin(current); fit.valid(); fit.advance()) {
INDEX_TYPE temp_id = fit.value();
if (my_grad_field->getCritical(temp_id) &&
cell_visited.count(temp_id) == 0) {
result.push_back(temp_id);
cell_visited.insert(temp_id);
}
else if (cell_visited.count(temp_id) == 0 &&
my_grad_field->getDimAscMan(temp_id) == temp_dim) {
INDEX_TYPE pair = my_grad_field->getPair(temp_id);
result.push_back(temp_id);
result.push_back(pair);
cell_visited.insert(temp_id);
cell_visited.insert(pair);
cell_queue.push(pair);
}
}
}
}
virtual void trace_down_cells_restricted_counting(const INDEX_TYPE& cellid,
std::vector<INDEX_TYPE>& result, std::vector<int>& counts) {
std::queue<INDEX_TYPE> cell_queue;
cell_queue.push(cellid);
DIM_TYPE temp_dim = my_grad_field->getDimAscMan(cellid) + 1;
result.clear();
counts.clear();
std::set<INDEX_TYPE> cell_visited;
// build the graph
std::map<INDEX_TYPE, std::set<INDEX_TYPE> > node_graph;
std::map<INDEX_TYPE, int > visit_counts;
while (!cell_queue.empty()) {
INDEX_TYPE current = cell_queue.front();
cell_queue.pop();
std::set<INDEX_TYPE> neighbors;
cell_visited.insert(current);
typename GridType::FacetsIterator fit(my_mesh_handler);
for (fit.begin(current); fit.valid(); fit.advance()) {
INDEX_TYPE temp_id = fit.value();
if (my_grad_field->getCritical(temp_id)) {
neighbors.insert(temp_id);
if (visit_counts.count(temp_id) == 0) {
visit_counts[temp_id] = 1;
}
else {
visit_counts[temp_id]++;
}
cell_visited.insert(temp_id);
}
else if (my_grad_field->getDimAscMan(temp_id) == temp_dim) {
INDEX_TYPE pair = my_grad_field->getPair(temp_id);
if (current == pair) continue;
neighbors.insert(pair);
if (visit_counts.count(pair) == 0) {
visit_counts[pair] = 1;
}
else {
visit_counts[pair]++;
}
if (cell_visited.count(pair) == 0) {
cell_queue.push(pair);
}
cell_visited.insert(temp_id);
cell_visited.insert(pair);
}
}
node_graph[current].insert(neighbors.begin(), neighbors.end());
}
//print graph
printf("\ngraph of %d:\n", cellid);
for (std::map<INDEX_TYPE, std::set<INDEX_TYPE> >::iterator mit = node_graph.begin();
mit != node_graph.end(); mit++) {
INDEX_TYPE tempid = (*mit).first;
printf(" n=%d\n", tempid);
for (std::set<INDEX_TYPE>::iterator sit = (*mit).second.begin();
sit != (*mit).second.end(); sit++)
printf(" -->%d\n", *sit);
}
// traverse graph from root
cell_queue.push(cellid);
while (!cell_queue.empty()) {
INDEX_TYPE current = cell_queue.front();
cell_queue.pop();
result.push_back(current);
counts.push_back(0);
for (std::set<INDEX_TYPE>::iterator it = node_graph[current].begin();
it != node_graph[current].end(); it++) {
INDEX_TYPE tempid = *it;
visit_counts[tempid]--;
if (visit_counts[tempid] == 0) {
cell_queue.push(tempid);
}
}
}
// the base case, 1 path from cell to itself
visit_counts[cellid] = 1;
for (int i = 0; i < result.size(); i++) {
INDEX_TYPE current = result[i];
int temp_count = visit_counts[current];
counts[i] = temp_count;
for (std::set<INDEX_TYPE>::iterator it = node_graph[current].begin();
it != node_graph[current].end(); it++) {
INDEX_TYPE tempid = *it;
visit_counts[tempid] += temp_count;
}
}
}
void rec_man_trace_up(INDEX_TYPE& cellid, std::set<INDEX_TYPE>& res) {
res.insert(cellid);
INDEX_TYPE current = cellid;
DIM_TYPE cdim = this->my_mesh_handler->dimension(cellid);
typename GridType::CofacetsIterator cofacets(my_mesh_handler);
for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) {
INDEX_TYPE temp_id = cofacets.value();
if (this->my_grad_field->getCritical(temp_id) || !my_grad_field->getAssigned(temp_id)) continue;
INDEX_TYPE temp_pair = my_grad_field->getPair(temp_id);
if (temp_pair == cellid) continue;
if (my_mesh_handler->dimension(temp_pair) != cdim) continue;
rec_man_trace_up(temp_pair, res);
}
}
protected:
void rec_man_trace_up_marking(INDEX_TYPE& cellid, DIM_TYPE value) {
if (my_grad_field->getDimAscMan(cellid) == value) return;
my_grad_field->setDimAscMan(cellid, value);
INDEX_TYPE current = cellid;
// DIM_TYPE cdim = this->my_mesh_handler->dimension(cellid);
typename GridType::CofacetsIterator cofacets(my_mesh_handler);
for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) {
INDEX_TYPE temp_id = cofacets.value();
if (this->my_grad_field->getCritical(temp_id) || !my_grad_field->getAssigned(temp_id)) continue;
INDEX_TYPE temp_pair = my_grad_field->getPair(temp_id);
if (temp_pair == cellid) continue;
//if (my_mesh_handler->dimension(temp_pair) != cdim) continue;
if (my_grad_field->getDimAscMan(temp_id) == value) continue;
my_grad_field->setDimAscMan(temp_id, value);
rec_man_trace_up_marking(temp_pair, value);
}
}
public:
void setAscendingManifoldDimensions() {
std::vector<INDEX_TYPE> criticals[4];
std::vector<INDEX_TYPE> topo_index_partition;
int num_threads;
#pragma omp parallel
{
#pragma omp single
{
num_threads = omp_get_num_threads();
ArrayIndexPartitioner::EvenChunkSplit(my_mesh_handler->numCells(), num_threads, topo_index_partition);
}
int thread_num = omp_get_thread_num();
typename GridType::AllCellsIterator all_cells_iterator(my_mesh_handler, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]);
for (all_cells_iterator.begin(); all_cells_iterator.valid(); all_cells_iterator.advance()) {
INDEX_TYPE cell_id = all_cells_iterator.value();
//my_grad_field->setMark(cell_id, 0);
my_grad_field->setDimAscMan(cell_id, 3);
if (my_grad_field->getCritical(cell_id)) {
DIM_TYPE tdim = my_mesh_handler->dimension(cell_id);
#pragma omp critical
{
criticals[tdim].push_back(cell_id);
}
}
}
}
// no now every cell is assigned to 3-manifold, and have list of critical points of each dimension
//printf("found %d %d %d %d crits\n", criticals[0].size(), criticals[1].size(), criticals[2].size(), criticals[3].size());
INDEX_TYPE num_1s = criticals[1].size();
#pragma omp parallel for schedule(dynamic)
for (INDEX_TYPE vid = 0; vid < num_1s; vid++) {
INDEX_TYPE cid = criticals[1][vid];
rec_man_trace_up_marking(cid, 2);
}
INDEX_TYPE num_2s = criticals[2].size();
#pragma omp parallel for schedule(dynamic)
for (INDEX_TYPE vid = 0; vid < num_2s; vid++) {
INDEX_TYPE cid = criticals[2][vid];
rec_man_trace_up_marking(cid, 1);
}
INDEX_TYPE num_3s = criticals[3].size();
#pragma omp parallel for schedule(static)
for (INDEX_TYPE vid = 0; vid < num_3s; vid++) {
INDEX_TYPE cid = criticals[3][vid];
my_grad_field->setDimAscMan(cid, 0);
}
}
};
}
#endif
|
exponential.h | #ifndef MATH_EXPONENTIAL_H
#define MATH_EXPONENTIAL_H
namespace math {
namespace exponential {
struct Phase {
arma::vec wavenumbers;
explicit
inline Phase(const arma::uword dim) :
wavenumbers(arma::zeros<arma::vec>(dim)) {}
explicit
inline Phase(const arma::vec & wavenumbers) :
wavenumbers(wavenumbers) {}
cx_double at(const arma::vec & position) const {
if (position.n_elem != this->wavenumbers.n_elem) {
throw Error(
"Different dimension between the position and exponential term");
}
return std::exp(
arma::sum((this->wavenumbers * cx_double{0.0, 1.0}) % position));
}
inline
Phase conj() const {
return Phase(arma::conj(this->wavenumbers));
}
Phase operator*(const Phase & phase) const {
const arma::vec new_wavenumbers = this->wavenumbers + phase.wavenumbers;
return Phase(new_wavenumbers);
}
Phase & operator=(const Phase &) = default;
};
template<typename T>
struct Term {
T coef;
arma::Col<T> wavenumbers;
template<typename U>
std::common_type_t<T, U> at(const arma::Col<U> & position) const {
if (position.n_elem != this->wavenumbers.n_elem) {
throw Error(
"Different dimension between the position and exponential term");
}
return coef * std::exp(arma::sum(this->wavenumbers % position));
}
explicit
inline
Term(const arma::uword dim, const T coef = T(0.0)) :
coef(coef),
wavenumbers(arma::zeros<arma::Col<T>>(dim)) {}
inline
Term(const T coef, const arma::Col<T> & wavenumbers) :
coef(coef),
wavenumbers(wavenumbers) {}
inline
arma::uword dim() const {
return this->wavenumbers.n_elem;
}
inline
Term<T> derivative(const arma::uword index) const {
return {this->wavenumbers(index) * this->coef, this->wavenumbers};
}
inline
Term<T> derivative(const arma::uvec & index) const {
return {arma::prod(this->wavenumbers % index) * index};
}
inline
Phase phase() const {
return Phase(arma::imag(this->wavenumbers));
}
template<typename U>
Term<std::common_type_t<T, U>> operator*(const Term<U> & term) const {
return {this->coef * term.coef, this->wavenumbers + term.wavenumbers};
}
template<typename U>
Term<std::common_type_t<T, U>> operator/(const Term<U> & term) const {
return {this->coef / term.coef, this->wavenumbers - term.wavenumbers};
}
Term & operator=(const Term &) = default;
};
}
template<typename T>
struct Exponential {
arma::Col<T> coefs;
arma::Mat<T> wavenumbers;
explicit
inline Exponential(const arma::uword dim, const T coef = T(0.0)) :
coefs(arma::Col<T>{coef}),
wavenumbers(arma::zeros<arma::Mat<T>>(dim, 1)) {}
explicit
inline Exponential(const exponential::Term<T> & term) :
coefs(arma::Col<T>{term.coef}),
wavenumbers(arma::conv_to<arma::Mat<T>>::from(term.wavenumbers)) {}
explicit
inline Exponential(const arma::Col<T> & coefs, const arma::Mat<T> wavenumbers)
:
coefs(coefs),
wavenumbers(wavenumbers) {
if (coefs.n_elem != wavenumbers.n_cols) {
throw Error("Different number of terms between coefs and wavenumbers");
}
}
template<typename U>
std::common_type_t<T, U> at(const arma::Col<U> & position) const {
if (position.n_elem != wavenumbers.n_rows) {
throw Error("different dimension between position and exponential term");
}
auto wavenumbers_with_position =
arma::conv_to<arma::Mat<std::common_type_t<T, U>>>::from(
this->wavenumbers);
wavenumbers_with_position.each_col() %= position;
return arma::sum(
this->coefs % arma::exp(arma::sum(wavenumbers_with_position)).st()
);
}
inline
exponential::Term<T> term(arma::uword index) const {
if (index >= this->coefs.n_elem) {
throw Error("The specified exponential term does not exist");
}
return exponential::Term<T>(this->coefs(index),
this->wavenumbers.col(index));
}
inline
arma::uword dim() const {
return this->wavenumbers.n_rows;
}
inline
Exponential<T> derivative(const arma::uword index) const {
if (index >= this->dim()) {
throw Error("Derivative operator out of bound");
}
return Exponential<T>(this->coefs % this->wavenumbers.row(index).st(),
this->wavenumbers);
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator+(const Exponential<U> & B) const {
if (this->dim() != B.dim()) {
throw Error("Different dimension between added exponential terms");
}
const arma::Col<std::common_type_t<T, U>>
new_this_coefs = arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(
this->coefs);
const arma::Col<std::common_type_t<T, U>>
new_B_coefs = arma::conv_to<arma::Col<std::common_type_t<T, U>>>::from(
B.coefs);
const arma::Mat<std::common_type_t<T, U>>
new_this_wavenumbers = arma::conv_to<arma::Mat<std::common_type_t<T, U>>>::from(
this->wavenumbers);
const arma::Mat<std::common_type_t<T, U>>
new_B_wavenumbers = arma::conv_to<arma::Mat<std::common_type_t<T, U>>>::from(
B.wavenumbers);
return Exponential<std::common_type_t<T, U>>(
arma::join_cols(new_this_coefs, new_B_coefs),
arma::join_rows(new_this_wavenumbers, new_B_wavenumbers));
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator+(const exponential::Term<U> & B) const {
return *this + Exponential(B);
}
template<typename U>
Exponential<std::common_type_t<T, U>> operator+(const U B) const {
return *this + Exponential(this->dim(), std::common_type_t<T, U>(B));
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator*(const exponential::Term<U> & B) const {
if (this->dim() != B.dim()) {
throw Error("Different dimension between added exponential terms");
}
auto new_wavenumbers = arma::conv_to<arma::Mat<std::common_type_t<T, U>>>::from(
this->wavenumbers);
new_wavenumbers.each_col() += B.wavenumbers;
const arma::Col<std::common_type_t<T, U>>
new_coefs = this->coefs * B.coef;
return Exponential<std::common_type_t<T,U>>(new_coefs, new_wavenumbers);
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator*(const Exponential<U> & B) const {
Exponential <std::common_type_t<T, U>> result_0 = (*this) * B.term(0);
#pragma omp parallel for
for (arma::uword i = 1; i < B.coefs.n_elem; i++) {
result_0 = result_0 + (*this) * B.term(i);
}
return result_0;
}
template<typename U>
Exponential<std::common_type_t<T, U>> operator*(const U B) const {
return {this->coefs * B, this->wavenumbers};
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator-(const Exponential<U> & B) const {
return *this + B * (-1.0);
}
template<typename U>
Exponential<std::common_type_t<T, U>> operator-(const U B) const {
return *this + (-B);
}
template<typename U>
Exponential<std::common_type_t<T, U>> operator/(const U B) const {
return *(this) * (1.0 / B);
}
template<typename U>
Exponential<std::common_type_t<T, U>>
operator/(const exponential::Term<T> & B) const {
auto new_wavenumbers = arma::conv_to<arma::Mat<std::common_type_t<T, U>>>::from(
this->wavenumbers);
new_wavenumbers.each_col() -= B.wavenumbers;
const arma::Col<std::common_type_t<T, U>> new_coefs = this->coefs / B.coef;
return Exponential<std::common_type_t<T, U>>(new_coefs, new_wavenumbers);
}
Exponential & operator=(const Exponential &) = default;
};
}
#endif //MATH_EXPONENTIAL_H
|
net_Greduced_code.h | /* including file for different pagerank subroutines */
#ifndef __GREDUCED__
#define __GREDUCED__
#include <stdio.h>
#include <math.h>
#include "filesize.h"
#include "matrix_simple.h"
#include "network_class.h"
#include "read_ascii.h"
#include "utf_string.h"
#include "quicksort_page.h"
double eps_pagerank=1e-13;
/********************************/
/*
copies the first part of in until the first '.' to out,
"in" and "out" maybe the same in which case "in" will be
effectively shortened
*/
void get_first_dot(char *out,const char *in){
int i;
for(i=0;in[i]!='\0' && in[i]!='.';i++){
out[i]=in[i];
}
out[i]='\0';
}
/********************************/
/*
copies the first part of in until the last '.' to out,
"in" and "out" maybe the same in which case "in" will be
effectively shortened
*/
void get_last_dot(char *out,const char *in){
int i,l;
for(i=0;in[i]!='\0';i++){
out[i]=in[i];
}
out[i]='\0';
for(l=i-1;l>=0;l--){
if(in[l]=='.'){
out[l]='\0';
break;
}
}
}
/*****************************************/
/* computes projection on reduced space */
inline void project_sub(dvec &small,dvec &big,ivec &node){
int len=node.dim;
small.resize(len);
int i;
for(i=0;i<len;i++) small[i]=big[node[i]];
}
/*************************************/
void print_mat(matrix& a,const char *filename=NULL,
const char *node_file_names=NULL){
int i,j,dimx,dimy,len=0,nlen,l;
char **node_names=NULL;
FILE *fp;
if(node_file_names!=NULL){
node_names=full_read_ascii_file(node_file_names,len);
// determine max node_name length
nlen=0;
for(i=0;i<len;i++){
l=strlen_utf(node_names[i]);
if(l>nlen) nlen=l;
}
nlen++;
}
if(filename==NULL){
fp=stdout;
}else{
fp=fopen(filename,"w");
}
dimx=a.x();
dimy=a.y();
for(i=0;i<dimy;i++){
for(j=0;j<dimx;j++){
fprintf(fp,"%5d\t %5d\t %24.16lg",i,j,a(i,j));
// if(node_file_names!=NULL && i<len && j<len){
if(i<len && j<len){
// fprintf(fp," %20s %20s",node_names[i],node_names[j]);
fprintf(fp,"\t");
fprintf_utf(fp,node_names[i],nlen);
fprintf(fp,"\t");
fprintf_utf(fp,node_names[j],nlen);
}
fprintf(fp,"\n");
}
fprintf(fp,"\n");
}
if(filename!=NULL) fclose(fp);
clear_file_buff(node_names);
}
/*************************************/
void print_mat_dens(matrix& a,const char *filename,int reverse=0){
int i,j,ii,dimx,dimy;
char buff[200];
FILE *fp;
get_last_dot(buff,filename);
strcat(buff,".mat");
fp=fopen(buff,"w");
dimx=a.x();
dimy=a.y();
fprintf(fp,"%d\n%d\n",dimx*dimy,dimy);
for(i=0;i<dimy;i++){
if(reverse) ii=dimy-1-i; else ii=i;
for(j=0;j<dimx;j++){
fprintf(fp,"%24.16lg\n",a(ii,j));
}
}
fclose(fp);
}
/*************************************/
void read_mat_dens(matrix& a,const char *filename,int reverse=0){
int ii,i,j,n,dimx,dimy;
FILE *fp;
fp=fopen(filename,"r");
if(fp==NULL) error("Input file in read_mat_dens not found!");
fscanf(fp,"%d%d",&n,&dimy);
dimx=n/dimy;
dvec::set_size(0);
matrix b(dimy,dimx);
fprintf(fp,"%d\n%d\n",dimx*dimy,dimy);
for(i=0;i<dimy;i++){
if(reverse) ii=dimy-1-i; else ii=i;
for(j=0;j<dimx;j++){
fscanf(fp,"%lf",&b(ii,j));
}
}
fclose(fp);
a=b;
}
/*************************************/
#define BUFF_READ_LEN 1001
void read_mat(matrix& a,const char *filename){
FILE *fp;
printf("Matrix input file = \"%s\"\n",filename);
fflush(stdout);
fp=fopen(filename,"r");
if(fp==NULL) error("Input file in read_mat not found!");
dvec::set_size(0);
// use binary read mode if filename contains ".bin"
if(strstr(filename,".bin")==NULL){
printf("Using ascii mode for matrix read\n");
fflush(stdout);
// ascii file mode
char buff[BUFF_READ_LEN];
int ii,i,j,n,dimx,dimy;
double val;
// first reading to determine matrix size
dimx=0; dimy=0;
while(fgets(buff,BUFF_READ_LEN,fp)!=0){
if(sscanf(buff,"%d%d%lf",&i,&j,&val)>=3){
if(i>dimy) dimy=i;
if(j>dimx) dimx=j;
}
}
fclose(fp);
dimx++; dimy++;
printf("matrix size: %d x %d\n",dimy,dimx);
fflush(stdout);
matrix b(dimy,dimx);
// second reading
fp=fopen(filename,"r");
if(fp==NULL) error("Input file in read_mat not found!");
n=0;
while(fgets(buff,BUFF_READ_LEN,fp)!=0){
if(sscanf(buff,"%d%d%lf",&i,&j,&val)>=3){
b(i,j)=val;
n++;
}
}
fclose(fp);
printf("read n = %d matrix elements: %d x %d - %d = %d\n",
n,dimy,dimx,n,dimy*dimx-n);
fflush(stdout);
a=b;
}else{
printf("Using binary mode for matrix read\n");
fflush(stdout);
// binary file mode
int l,n,i;
l=fread(&n,sizeof(int),1,fp);
if(l!=1) error("binary file read error for matrix size");
printf("Binary file: matrix size = %d x %d\n",n,n);
fflush(stdout);
if(n<=0) error("invalid matrix size");
matrix b(n,n);
for(i=0;i<n;i++){
l=fread(&b[i][0],sizeof(double),n,fp);
if(l!=n) error("binary file read error for matrix elements");
}
fclose(fp);
a=b;
}
printf("\n");
}
/*****************************************/
/* calculation of \sum_i abs(a(i)-b(i))/(abs(a(i)+b(i)) */
inline double diff_norm_rel(dvec &a,dvec &b){
double sum,ss;
int i,n;
n=a.size(); sum=0.0;
//#pragma omp parallel for reduction(+:sum)
for(i=0;i<n;i++){
ss=abs(a[i])+abs(b[i]);
if(ss==0) continue;
sum+=(abs(a[i]-b[i])/ss);
}
return sum;
}
/*****************************************/
/* calculation of \sum_i a(i) = e^T a */
inline double sum_vector(dvec &a){
double sum;
int i,n;
n=a.size(); sum=0.0;
// #pragma omp parallel for reduction(+:sum)
for(i=0;i<n;i++) sum+=a[i];
return sum;
}
/*****************************************/
/* normalization of pagerank \sum_i a(i)=1 assuming a(i)>=0
return value = used 1-norm before normalization
*/
inline double pagerank_normalize(dvec &a){
double sum;
sum=sum_vector(a);
a/=sum;
return sum;
}
/*****************************************/
/* small matrix vector multiplication */
void mat_vec_mult(dvec &out,matrix& a,dvec &in){
int n=in.size();
if(n!=out.size() || n!=a.x() || n!=a.y())
error("wrong mat-vec-size in mat_vec_mult");
int i,j;
for(i=0;i<n;i++){
out[i]=0;
for(j=0;j<n;j++) out[i]+=a(i,j)*in[j];
}
}
/*****************************************/
/* calculation of pagerank with the power method
for small reduced google matrices
*/
void calc_pagerank_small(dvec &pagerank,matrix& GR,int iprint){
double quality,quality_rel,q1,qfak,pnorm;
int i,max_iter;
if(iprint<=0) iprint=1;
max_iter=400;
printf("max_iter = %d\n",max_iter);
fflush(stdout);
qfak=1.0+0.15/2.0;
pnorm=pagerank_normalize(pagerank);
dvec a(pagerank);
quality_rel=1e40;
for(i=0;i<=max_iter;i++){
swap(a,pagerank);
mat_vec_mult(pagerank,GR,a);
if(i%iprint==0 || i==max_iter){
quality=diff_norm1(pagerank,a);
q1=quality_rel;
quality_rel=diff_norm_rel(pagerank,a);
// pnorm=pagerank_normalize(pagerank);
pnorm=sum_vector(pagerank);
printf("%5d %18.10lg %18.10lg %25.16lg\n",
i,quality,quality_rel,pnorm);
fflush(stdout);
if(quality_rel<eps_pagerank) break;
if(quality_rel<1e-4){
if(quality_rel*qfak>q1) break;
}
}
}
printf("Convergence at i = %d.\n",i);
fflush(stdout);
}
/*****************************************/
/* calculation of pagerank with the power method */
void calc_pagerank_power(dvec &pagerank,network &net,
double delta_alpha,int iprint,int trans_flag=0){
double quality,quality_rel,q1,qfak,pnorm;
int i,max_iter;
if(iprint<=0) iprint=1;
max_iter=(int)(-log(eps_pagerank)/(delta_alpha+3E-7));
max_iter*=2;
printf("max_iter = %d\n",max_iter);
fflush(stdout);
qfak=1.0+delta_alpha/2.0;
pnorm=pagerank_normalize(pagerank);
dvec a(pagerank);
quality_rel=1e40;
for(i=0;i<=max_iter;i++){
swap(a,pagerank);
if(trans_flag){
net.GTmult(delta_alpha,pagerank,a);
}else{
net.GGmult(delta_alpha,pagerank,a);
}
if(i%iprint==0 || i==max_iter){
quality=diff_norm1(pagerank,a);
q1=quality_rel;
quality_rel=diff_norm_rel(pagerank,a);
// pnorm=pagerank_normalize(pagerank);
pnorm=sum_vector(pagerank);
printf("%5d %18.10lg %18.10lg %25.16lg\n",
i,quality,quality_rel,pnorm);
fflush(stdout);
if(quality_rel<eps_pagerank) break;
if(quality_rel<1e-4){
if(quality_rel*qfak>q1) break;
}
}
}
printf("Convergence at i = %d.\n",i);
fflush(stdout);
}
/*****************************************/
/* calculation of pagerank for "PG" with P=projector and
G=Google matrix, return value = corresponding eigenvalue,
with the power method
node = array of length len, P=projector on nodes different from node[i]
*/
double calc_pagerank_project(dvec &pagerank,network &net,
double delta_alpha,int iprint,
ivec &node,int trans_flag=0){
double quality,quality_rel,q1,qfak,pnorm,dlambda,dlambda_old;
int i,max_iter,l;
if(iprint<=0) iprint=1;
max_iter=(int)(-log(eps_pagerank)/(delta_alpha+3E-7));
max_iter*=2;
printf("max_iter = %d\n",max_iter);
fflush(stdout);
qfak=1.0+delta_alpha/2.0;
pnorm=pagerank_normalize(pagerank);
dvec a(pagerank);
quality_rel=1e40;
dlambda=0;
for(l=0;l<node.dim;l++){
dlambda+=pagerank[node[l]];
pagerank[node[l]]=0;
}
dlambda_old=dlambda;
pnorm=pagerank_normalize(pagerank);
if(trans_flag) dlambda=1.0-pnorm;
for(i=0;i<=max_iter;i++){
swap(a,pagerank);
if(trans_flag){
net.GTmult(delta_alpha,pagerank,a);
}else{
net.GGmult(delta_alpha,pagerank,a);
}
// pnorm=pagerank_normalize(pagerank);
// printf("--> %5d %25.16lg\n",i,pnorm);
// fflush(stdout);
dlambda=0;
for(l=0;l<node.dim;l++){
dlambda+=pagerank[node[l]];
pagerank[node[l]]=0;
}
pnorm=pagerank_normalize(pagerank);
if(trans_flag) dlambda=1.0-pnorm;
if(i%iprint==0 || i==max_iter){
quality=diff_norm1(pagerank,a);
q1=quality_rel;
quality_rel=diff_norm_rel(pagerank,a);
// pnorm=pagerank_normalize(pagerank);
// pnorm=sum_vector(pagerank);
#pragma omp critical(print)
{
printf("%5d %18.10lg %18.10lg %25.16lg %18.10lg %25.16lg\n",
i,quality,quality_rel,dlambda,abs(dlambda-dlambda_old),pnorm);
fflush(stdout);
}
dlambda_old=dlambda;
if(quality_rel<eps_pagerank) break;
if(quality_rel<1e-3){
if(quality_rel*qfak>q1) break;
}
}
}
#pragma omp critical(print)
{
printf("Convergence at i = %d with lambda = %25.16lg.\n",i,1.0-dlambda);
fflush(stdout);
}
return dlambda;
}
/**************************************/
// print the pagerank vector to a file
void print_pagerank(dvec &pagerank,network &net,double delta_alpha,
const char *extra,int print_number=0,int ten_number=100){
double sum;
int j,n;
char buff[300];
FILE *fp;
n=pagerank.size();
ivec permut(n);
init_permutation(permut.c,n);
quicksort_down(pagerank.c,0,n-1,permut.c);
// assure all elements of pagerank are positif
if(pagerank[permut[0]]<0) pagerank*=(-1.0);
// create file name
sprintf(buff,"pagerank%s_%s_%lg.dat",extra,net.base_name.c,delta_alpha);
// writing of pagerank file
fp=fopen(buff,"w");
fprintf(fp,"# size = %10d\n",n);
if(print_number<=0){
for(j=0;j<n;j++){
fprintf(fp,"%6d %24.15lg %6d\n",j,pagerank[permut[j]],permut[j]);
}
}else{
double ten_fak=exp(log(10.0)/(double)ten_number);
int jlast=0;
for(j=1;j<=n;j++){
if(j<=print_number || (double)j>ten_fak*(double)jlast || j==n){
jlast=j;
fprintf(fp,"%6d %24.15lg %6d\n",j-1,pagerank[permut[j-1]],permut[j-1]);
}
}
}
fclose(fp);
}
/**************************************/
/* print the subpagerank vector to a file
*/
void print_subpagerank(dvec &pagerank,network &net,double delta_alpha,
const char *extra,ivec &node,
const char *node_file_names=NULL){
double sum;
int i,n,len2=0,nlen,l;
char buff[300];
char **node_names=NULL;
FILE *fp;
if(node_file_names!=NULL){
node_names=full_read_ascii_file(node_file_names,len2);
// determine max node_name length
nlen=0;
for(i=0;i<len2;i++){
l=strlen_utf(node_names[i]);
if(l>nlen) nlen=l;
}
nlen++;
}
n=pagerank.size();
ivec permut(n),pinv(n);
init_permutation(permut.c,n);
quicksort_down(pagerank.c,0,n-1,permut.c);
for(i=0;i<n;i++) pinv[permut[i]]=i;
// assure all elements of pagerank are positif
if(pagerank[permut[0]]<0) pagerank*=(-1.0);
sum=0.0;
for(i=0;i<node.dim;i++) sum+=pagerank[node[i]];
// projection of subpagerank
// dvec pg(node.dim);
// project_sub(pg,pagerank,node);
// create file name
sprintf(buff,"subpagerank%s_%s_%lg.dat",extra,net.base_name.c,delta_alpha);
// writing of pagerank file
fp=fopen(buff,"w");
fprintf(fp,"## size = %10d norm = %25.15lg\n",node.dim,sum);
fprintf(fp,"# reduced index, value, normalized value, original index, K index");
if(len2>0) fprintf(fp,", name");
fprintf(fp,"\n");
sum=1.0/sum;
for(i=0;i<node.dim;i++){
fprintf(fp,"%6d\t %24.15lg\t %24.15lg\t %8d\t %8d",i,pagerank[node[i]],
sum*pagerank[node[i]],node[i],pinv[node[i]]);
if(i<len2){
fprintf(fp,"\t");
fprintf_utf(fp,node_names[i],nlen);
}
fprintf(fp,"\n");
}
fclose(fp);
clear_file_buff(node_names);
}
/*****************************************/
/* also computes the usual PageRank since only 3 threads are used */
inline double compute_project(dvec &right,dvec &left,dvec &pg,
network &net,
double delta_alpha,ivec &node){
int iprint=10;
double sp,dlambda1,dlambda2,dlambda3;
ivec node0(0);
right.put_value(1.0);
left.put_value(1.0);
pg.put_value(1.0);
#pragma omp parallel sections
{
#pragma omp section
dlambda2=calc_pagerank_project(left,net,delta_alpha,iprint,node,1);
#pragma omp section
dlambda1=calc_pagerank_project(right,net,delta_alpha,iprint,node);
#pragma omp section
dlambda3=calc_pagerank_project(pg,net,delta_alpha,iprint,node0);
}
sp=1.0/scalar_product(left,right);
left*=sp;
sp=scalar_product(left,right);
#pragma omp critical(print)
{
printf("dlambda = %24.16lg diff = %lg\n",
dlambda1,abs(dlambda1-dlambda2));
printf("TEST: psi_left^T * psi_right = %26.16lg\n",sp);
fflush(stdout);
}
return dlambda1;
}
/*****************************************/
/* computes: v = (1/f) P * v = (1/f) right * (left^T v)
with f being some inverse factor */
inline void projectP(dvec &right,dvec &left,dvec &v,double f=1){
int i,n=v.size();
double sp;
sp=scalar_product(left,v)/f;
v.test(right);
for(i=0;i<n;i++) v[i]=sp*right[i];
}
/*****************************************/
/* computes: v = Q * v = (1 - P) * v = v - right * (left^T v) */
inline void projectQ(dvec &right,dvec &left,dvec &v){
double sp;
sp=scalar_product(left,v);
v.lam_diff(sp,right);
}
/*****************************************/
inline void get_cnode(int n,ivec &cnode,ivec &node){
int i,j;
if(n<node.dim) error("n<len in get_cnode");
ivec f(n);
for(i=0;i<n;i++) f[i]=0;
for(i=0;i<node.dim;i++){
if(node[i]>=n || node[i]<0) error("node[i] out of range in get_cnode");
f[node[i]]=1;
}
j=0;
for(i=0;i<n;i++){
if(!f[i]){
cnode[j]=i;
j++;
}
}
// printf("j = %d cnode.dim = %d\n",j,cnode.dim);
// fflush(stdout);
if(j!=cnode.dim) error("wrong cnode size in get_cnode");
}
/*****************************************/
#define MAX_GR_SIMPLE 3000
void compute_GR_simple(matrix &G_R,network &net,double delta_alpha,
ivec &node){
int n=net.size;
if(n>MAX_GR_SIMPLE) error("Too large network for compute_GR_simple");
int nr=node.dim;
int ns=n-nr;
ivec cnode(ns);
get_cnode(n,cnode,node);
dvec::set_size(0);
matrix G_rr(nr,nr),G_rs(nr,ns),G_sr(ns,nr),G_ss(ns,ns);
dvec in(n,0.0),out(n);
int i,j;
// filling of G_rr and G_sr
for(i=0;i<nr;i++){
in[node[i]]=1;
net.GGmult(delta_alpha,out,in);
in[node[i]]=0;
for(j=0;j<nr;j++) G_rr(j,i)=out[node[j]];
for(j=0;j<ns;j++) G_sr(j,i)=out[cnode[j]];
}
// filling of G_rs and G_ss
for(i=0;i<ns;i++){
in[cnode[i]]=1;
net.GGmult(delta_alpha,out,in);
in[cnode[i]]=0;
for(j=0;j<nr;j++) G_rs(j,i)=out[node[j]];
for(j=0;j<ns;j++) G_ss(j,i)=-out[cnode[j]];
}
for(j=0;j<ns;j++) G_ss(j,j)+=1.0;
G_sr/=G_ss;
G_R=G_rs*G_sr;
G_R+=G_rr;
}
/*****************************************/
#ifndef ITER_MODE
// implementation with power series mode:
// s=(\sum_{n=0}^\infty g^n)]v
// using: s_0=v, f_0=v: and f_{n+1}=g f_n, s_{n+1}=s_n+f_{n+1}
// with g= \bar G_{ss}
void compute_GR(matrix &G_R,matrix& G_rr,matrix& G_pr,matrix& G_qr,
matrix& G_I,dvec& psiL,dvec& psiR,dvec& pg,
network &net,double delta_alpha,ivec node){
int n=net.size;
int nr=node.dim;
int ns=n-nr;
if(G_R.x()!=nr || G_R.y()!=nr)
error("Wrong matrix size of G_R in comput_GR");
if(G_rr.x()!=nr || G_rr.y()!=nr)
error("Wrong matrix size of G_rr in comput_GR");
if(G_pr.x()!=nr || G_pr.y()!=nr)
error("Wrong matrix size of G_pr in comput_GR");
if(G_qr.x()!=nr || G_qr.y()!=nr)
error("Wrong matrix size of G_qr in comput_GR");
if(G_I.x()!=nr || G_I.y()!=nr)
error("Wrong matrix size of G_I in comput_GR");
double dlambda;
int j,l;
double quality;
int i,max_iter;
max_iter=(int)(-log(eps_pagerank)/(delta_alpha+3E-7));
max_iter*=2;
printf("Computation of left and right eigenvectors of G_ss\n");
fflush(stdout);
dlambda=compute_project(psiR,psiL,pg,net,delta_alpha,node);
dvec in(n),out(n),s(n),t(n),f(n),f2(n);
// note that the last line also fixes the default size of dvec to n
// which is important in the private declaration below which implicitely
// calls the default constructor of dvec for each thread
#pragma omp parallel for schedule(dynamic) private(in,out,s,t,f,f2,j,l,quality)
for(i=0;i<nr;i++){
in.put_value(0.0);
in[node[i]]=1;
net.GGmult(delta_alpha,out,in);
in[node[i]]=0;
for(j=0;j<nr;j++){
G_R(j,i)=out[node[j]];
G_rr(j,i)=out[node[j]];
out[node[j]]=0;
}
s=out;
projectP(psiR,psiL,out,dlambda);
projectQ(psiR,psiL,s);
f=s;
for(l=0;l<max_iter;l++){
t=s;
net.GGmult(delta_alpha,f2,f,0);
swap(f,f2);
for(j=0;j<nr;j++) f[node[j]]=0;
projectQ(psiR,psiL,f);
s+=f;
quality=diff_norm1(t,s);
#pragma omp critical(print)
{
if(l%10==0){
printf("%5d %5d %18.10lg %18.10lg\n",i,l,quality,norm1(f));
fflush(stdout);
}
}
// if(quality<eps_pagerank) break;
if(quality<=0) break;
}
#pragma omp critical(print)
{
printf("%5d ",i);
printf("Convergence: %5d %5d %18.10lg %18.10lg\n",
i,l,quality,norm1(f));
fflush(stdout);
}
net.GGmult(delta_alpha,f,out,0);
for(j=0;j<nr;j++){
G_pr(j,i)=f[node[j]];
}
net.GGmult(delta_alpha,f,s,0);
for(j=0;j<nr;j++){
G_qr(j,i)=f[node[j]];
}
out+=s;
net.GGmult(delta_alpha,f,out,0);
for(j=0;j<nr;j++){
G_I(j,i)=f[node[j]];
G_R(j,i)+=f[node[j]];
}
}
}
/*****************************************/
#else
// implementation with a modified iteration mode:
// s=(\sum_{n=0}^\infty g^n)]v
// using: s_0=v, s_{n+1}=v+g*s_n
// with g= \bar G_{ss}
void compute_GR(matrix &G_R,matrix& G_rr,matrix& G_pr,matrix& G_qr,
matrix& G_I,dvec& psiL,dvec& psiR,dvec& pg,
network &net,double delta_alpha,ivec node){
int n=net.size;
int nr=node.dim;
int ns=n-nr;
if(G_R.x()!=nr || G_R.y()!=nr)
error("Wrong matrix size of G_R in comput_GR");
if(G_rr.x()!=nr || G_rr.y()!=nr)
error("Wrong matrix size of G_rr in comput_GR");
if(G_pr.x()!=nr || G_pr.y()!=nr)
error("Wrong matrix size of G_pr in comput_GR");
if(G_qr.x()!=nr || G_qr.y()!=nr)
error("Wrong matrix size of G_qr in comput_GR");
if(G_I.x()!=nr || G_I.y()!=nr)
error("Wrong matrix size of G_I in comput_GR");
double dlambda;
int j,l;
double quality,old_quality,fak;
// fak=1-delta_alpha/2;
// fak=0.99;
fak=1-delta_alpha/10;
printf("### Convergence: FAK = %lg\n",fak);
fflush(stdout);
int i,max_iter;
// max_iter=(int)(-log(eps_pagerank)/(delta_alpha+3E-7));
// max_iter*=2;
max_iter=(int)(-18*log(10.0)/(log(1-delta_alpha)+1e-7));
printf("Using modified iteration mode with max_iter = %d\n",max_iter);
printf("Computation of left and right eigenvectors of G_ss\n");
fflush(stdout);
dlambda=compute_project(psiR,psiL,pg,net,delta_alpha,node);
dvec in(n),out(n),s(n),t(n),v(n),s2(n);
// note that the last line also fixes the default size of dvec to n
// which is important in the private declaration below which implicitely
// calls the default constructor of dvec for each thread
#pragma omp parallel for schedule(dynamic) private(in,out,s,t,v,s2,j,l,quality,old_quality)
for(i=0;i<nr;i++){
in.put_value(0.0);
in[node[i]]=1;
net.GGmult(delta_alpha,out,in);
in[node[i]]=0;
for(j=0;j<nr;j++){
G_R(j,i)=out[node[j]];
G_rr(j,i)=out[node[j]];
out[node[j]]=0;
}
s=out;
projectP(psiR,psiL,out,dlambda);
projectQ(psiR,psiL,s);
v=s;
quality=1e100;
for(l=0;l<max_iter;l++){
t=s;
net.GGmult(delta_alpha,s2,s,0);
for(j=0;j<nr;j++) s2[node[j]]=0;
projectQ(psiR,psiL,s2);
swap(s,s2);
s+=v;
old_quality=quality;
quality=diff_norm1(t,s);
#pragma omp critical(print)
{
if(l%10==0){
printf("%5d %5d %18.10lg %18.10lg\n",i,l,quality,old_quality);
fflush(stdout);
}
}
if(quality<eps_pagerank && quality>old_quality*fak) break;
// if(quality<=0) break;
}
#pragma omp critical(print)
{
printf("%5d ",i);
printf("Convergence: %5d %5d %18.10lg %18.10lg\n",
i,l,quality,old_quality);
fflush(stdout);
}
net.GGmult(delta_alpha,s2,out,0);
for(j=0;j<nr;j++){
G_pr(j,i)=s2[node[j]];
}
net.GGmult(delta_alpha,s2,s,0);
for(j=0;j<nr;j++){
G_qr(j,i)=s2[node[j]];
}
out+=s;
net.GGmult(delta_alpha,s2,out,0);
for(j=0;j<nr;j++){
G_I(j,i)=s2[node[j]];
G_R(j,i)+=s2[node[j]];
}
}
}
#endif
/*****************************************/
void renorm_google(matrix &a){
int n=a.x();
if(n!=a.y()){
printf("Warning: matrix not square in renorm_google.\n");
fflush(stdout);
}
int i,j;
double sum;
for(i=0;i<n;i++){
sum=0;
for(j=0;j<n;j++){
if(a(j,i)<0){
// printf("Warning: negative element a(%d,%d) in renorm_google.\n",j,i);
// printf("Taking abs-value.\n");
a(j,i)=abs(a(j,i));
fflush(stdout);
}
sum+=a(j,i);
}
sum=1.0/sum;
for(j=0;j<n;j++) a(j,i)*=sum;
}
}
/*****************************************/
/* a = a + b for non-diagonal elements of b */
void add_non_diag(matrix &a,matrix &b){
int n=a.x();
int i,j;
for(i=0;i<n;i++) for(j=0;j<n;j++){
if(i!=j) a(i,j)+=b(i,j);
}
}
#endif
|
GB_binop__second_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__second_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__second_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int64)
// A*D function (colscale): GB (_AxD__second_int64)
// D*A function (rowscale): GB (_DxB__second_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int64_t
// A type: int64_t
// A pattern? 1
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT64 || GxB_NO_SECOND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
reduction-clauseModificado7.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20, a[n],suma=0;
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if(n>20) {n=20; printf("n=%d",n);}
for(i=0;i<n;i++) a[i]=i;
#pragma omp parallel private(i) reduction(+:suma)
{
for(i=omp_get_thread_num();i<n;i+=omp_get_num_threads()){
suma += a[i];
printf("\nthread %d suma a[%d] y suma vale: %d ", omp_get_thread_num(), i,suma);
}
}
printf("\nTras 'parallel' suma=%d\n",suma);
}
|
electrons.c | /*---------------------------------------------------------------------------------
ELECTRONS.C
-Initialize electron and gas entropies
-Assign electron and total entropies based on https://academic.oup.com/mnras/article/454/2/1848/2892599
---------------------------------------------------------------------------------*/
#include "decs.h"
#if ELECTRONS
// TODO put these in options with a default in decs.h
// Defined as in decs.h, CONSTANT not included in ALLMODELS version
// KAWAZURA is run by default if ALLMODELS=0
#define KAWAZURA 9
#define WERNER 10
#define ROWAN 11
#define SHARMA 12
#define CONSTANT 5 //tbh, this is never considered
void fixup_electrons_1zone(struct FluidState *S, int i, int j);
void heat_electrons_1zone(struct GridGeom *G, struct FluidState *Sh, struct FluidState *S, int i, int j);
double get_fels(struct GridGeom *G, struct FluidState *S, int i, int j, int model);
void init_electrons(struct GridGeom *G, struct FluidState *S)
{
ZLOOPALL {
// Set electron internal energy to constant fraction of internal energy
double uel = fel0*S->P[UU][j][i];
// Initialize entropies
S->P[KTOT][j][i] = (gam-1.)*S->P[UU][j][i]*pow(S->P[RHO][j][i],-gam);
// Initialize model entropy(ies)
for (int idx = KEL0; idx < NVAR ; idx++) {
S->P[idx][j][i] = (game-1.)*uel*pow(S->P[RHO][j][i],-game);
}
}
// Necessary? Usually called right afterward
set_bounds(G, S);
}
// TODO merge these
void heat_electrons(struct GridGeom *G, struct FluidState *Ss, struct FluidState *Sf)
{
timer_start(TIMER_ELECTRON_HEAT);
#pragma omp parallel for collapse(2)
ZLOOP {
heat_electrons_1zone(G, Ss, Sf, i, j);
}
timer_stop(TIMER_ELECTRON_HEAT);
}
inline void heat_electrons_1zone(struct GridGeom *G, struct FluidState *Ss, struct FluidState *Sf, int i, int j)
{
// Actual entropy at final time
double kHarm = (gam-1.)*Sf->P[UU][j][i]/pow(Sf->P[RHO][j][i],gam);
// Evolve model entropy(ies)
for (int idx = KEL0; idx < NVAR ; idx++) {
double fel = get_fels(G, Ss, i, j, idx);
Sf->P[idx][j][i] += (game-1.)/(gam-1.)*pow(Ss->P[RHO][j][i],gam-game)*fel*(kHarm - Sf->P[KTOT][j][i]);
}
// Reset total entropy
Sf->P[KTOT][j][i] = kHarm;
}
// New function for ALLMODELS runs.
inline double get_fels(struct GridGeom *G, struct FluidState *S, int i, int j, int model)
{
get_state(G, S, i, j, CENT);
double bsq = bsq_calc(S, i, j);
double fel = 0.0;
if (model == KAWAZURA) {
// Equation (2) in http://www.pnas.org/lookup/doi/10.1073/pnas.1812491116
double Tpr = (gamp-1.)*S->P[UU][j][i]/S->P[RHO][j][i];
double uel = 1./(game-1.)*S->P[model][j][i]*pow(S->P[RHO][j][i],game);
double Tel = (game-1.)*uel/S->P[RHO][j][i];
if(Tel <= 0.) Tel = SMALL;
if(Tpr <= 0.) Tpr = SMALL;
double Trat = fabs(Tpr/Tel);
double pres = S->P[RHO][j][i]*Tpr; // Proton pressure
double beta = pres/bsq*2;
if(beta > 1.e20) beta = 1.e20;
double QiQe = 35./(1. + pow(beta/15.,-1.4)*exp(-0.1/Trat));
fel = 1./(1. + QiQe);
} else if (model == WERNER) {
// Equation (3) in http://academic.oup.com/mnras/article/473/4/4840/4265350
double sigma = bsq/S->P[RHO][j][i];
fel = 0.25*(1+pow(((sigma/5.)/(2+(sigma/5.))), .5));
} else if (model == ROWAN) {
// Equation (34) in https://iopscience.iop.org/article/10.3847/1538-4357/aa9380
double pres = (gamp-1.)*S->P[UU][j][i]; // Proton pressure
double pg = (gam-1)*S->P[UU][j][i];
double beta = pres/bsq*2;
double sigma = bsq/(S->P[RHO][j][i]+S->P[UU][j][i]+pg);
double betamax = 0.25/sigma;
fel = 0.5*exp(-pow(1-beta/betamax, 3.3)/(1+1.2*pow(sigma, 0.7)));
} else if (model == SHARMA) {
// Equation for \delta on pg. 719 (Section 4) in https://iopscience.iop.org/article/10.1086/520800
double Tpr = (gamp-1.)*S->P[UU][j][i]/S->P[RHO][j][i];
double uel = 1./(game-1.)*S->P[model][j][i]*pow(S->P[RHO][j][i],game);
double Tel = (game-1.)*uel/S->P[RHO][j][i];
if(Tel <= 0.) Tel = SMALL;
if(Tpr <= 0.) Tpr = SMALL;
double Trat_inv = fabs(Tel/Tpr); //Inverse of the temperature ratio in KAWAZURA
double QeQi = 0.33 * pow(Trat_inv, 0.5);
fel = 1./(1.+1./QeQi);
}
#if SUPPRESS_HIGHB_HEAT
if(bsq/S->P[RHO][j][i] > 1.) fel = 0;
#endif
return fel;
}
void fixup_electrons(struct FluidState *S)
{
timer_start(TIMER_ELECTRON_FIXUP);
#pragma omp parallel for collapse(2)
ZLOOP {
fixup_electrons_1zone(S, i, j);
}
timer_stop(TIMER_ELECTRON_FIXUP);
}
inline void fixup_electrons_1zone(struct FluidState *S, int i, int j)
{
double kelmax = S->P[KTOT][j][i]*pow(S->P[RHO][j][i],gam-game)/(tptemin*(gam-1.)/(gamp-1.) + (gam-1.)/(game-1.));
double kelmin = S->P[KTOT][j][i]*pow(S->P[RHO][j][i],gam-game)/(tptemax*(gam-1.)/(gamp-1.) + (gam-1.)/(game-1.));
// Replace NANs with cold electrons
for (int idx = KEL0; idx < NVAR ; idx++) {
if (isnan(S->P[idx][j][i])) S->P[idx][j][i] = kelmin;
// Enforce maximum Tp/Te
S->P[idx][j][i] = MY_MAX(S->P[idx][j][i], kelmin);
// Enforce minimum Tp/Te
S->P[idx][j][i] = MY_MIN(S->P[idx][j][i], kelmax);
}
}
#endif // ELECTRONS
|
diamond_ts.c | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include "mpi.h"
#include "data_structures.h"
#define ST_BUSY (0)
#define ST_NOT_BUSY (1)
extern int get_ntg(Parameters);
extern void sub_array_copy_tg(const real_t * restrict src_buf, real_t * restrict dst_buf, int *src_size, int *dst_size, int *cpy_size, int *src_offs, int *dst_offs, int);
typedef struct{
volatile int *t_pos;
int *state;
} Diam_Sched_State;
volatile int *avail_list;
volatile uint64_t head, tail;
int diam_width;
Diam_Sched_State st;
int y_len_l, y_len_r;
int t_len;
int mpi_size;
real_t *send_buf_l, *recv_buf_l, *send_buf_r, *recv_buf_r;
MPI_Request wait_req_send_l[2], wait_req_recv_l[2], wait_req_send_r[2], wait_req_recv_r[2];
// deprecated but can be useful for debugging
void intra_diamond_trapzd_comp(Parameters *p, int yb, int ye){
int t;
// use all the threads in the initialization of the time stepper
// int swp_tgs = p->stencil_ctx.thread_group_size;
// p->stencil_ctx.thread_group_size = p->num_threads;
for(t=0; t<p->t_dim+1; t++){
// compute H-field
//printf("Prologue H -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, H_FIELD, p->stencil_ctx);
ye -= p->stencil.r;
// compute E-field
//printf("Prologue E -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
yb += p->stencil.r;
}
// p->stencil_ctx.thread_group_size = swp_tgs;
}
void intra_diamond_inv_trapzd_comp(Parameters *p, int yb, int ye){
int t=0;
int t_dim = p->t_dim;
// use all the threads in the initialization of the time stepper
// int swp_tgs = p->stencil_ctx.thread_group_size;
// p->stencil_ctx.thread_group_size = p->num_threads;
//printf("Epilogue E -- t:%d yb:%d ye:%d\n", t, yb, ye);
// update the first E field of the prologue
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
ye += p->stencil.r;
for(t=0; t<t_dim; t++){
//printf("Epilogue H -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, H_FIELD, p->stencil_ctx);
yb -= p->stencil.r;
//printf("Epilogue E -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
ye += p->stencil.r;
}
// p->stencil_ctx.thread_group_size = swp_tgs;
}
void intra_diamond_comp(Parameters *p, int yb, int ye, int b_inc, int e_inc){
int t;
//printf("Main b_inc:%d e_inc:%d\n", b_inc, e_inc);
for(t=0; t< (p->t_dim+1)*2; t++){
// compute E-field
//printf("Main E -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
if(t <= p->t_dim){ // inverted trapezoid (or lower half of the diamond)
ye += e_inc;
}else{ // trapezoid (or upper half of the diamond)
yb += b_inc;
}
// compute H-field
//printf("Main H -- t:%d yb:%d ye:%d\n", t, yb, ye);
p->stencil.spt_blk_func(p->ldomain_shape, p->stencil.r, yb, p->stencil.r, p->lstencil_shape[0]+p->stencil.r, ye,
p->ldomain_shape[2]-p->stencil.r, p->coef, p->U1, p->U2, p->U3, H_FIELD, p->stencil_ctx);
if(t < p->t_dim){ // inverted trapezoid (or lower half of the diamond)
yb -= b_inc;
}else{ // trapezoid (or upper half of the diamond)
ye -= e_inc;
}
}
}
// SEND TO LEFT
static inline void intra_diamond_strided_send_left(Parameters *p){
ierr = MPI_Isend(p->U1, 1, p->hu[1].send_hb, p->t.down, 0, p->t.cart_comm, &(wait_req_send_l[0])); CHKERR(ierr);
ierr = MPI_Isend(p->U2, 1, p->hv[1].send_hb, p->t.down, 0, p->t.cart_comm, &(wait_req_send_l[1])); CHKERR(ierr);
}
static inline void intra_diamond_concat_send_left(Parameters *p, real_t *send_buf){
// concatenate the halo data then communicate contiguous data
// assuming same halo size for both U and V buffers
int h_size = p->hu[1].size;
int *offs;
int i, j, k;
int z_offs[] = {0,0,0};
int h_offs[] = {h_size,0,0};
if( p->t.down != MPI_PROC_NULL) {
// copy the left halo of U to the buffer
sub_array_copy_tg(p->U1, send_buf, p->ldomain_shape, p->hu[1].shape, p->hu[1].shape, p->hu[1].send_b, z_offs, p->stencil_ctx.thread_group_size);
// copy the left halo of V to the buffer
sub_array_copy_tg(p->U2, send_buf, p->ldomain_shape, p->hu[1].shape, p->hu[1].shape, p->hv[1].send_b, h_offs, p->stencil_ctx.thread_group_size);
}
// send the data out
ierr = MPI_Isend(send_buf, 2*h_size, MPI_real_t, p->t.down , 0, p->t.cart_comm, &(wait_req_send_l[0])); CHKERR(ierr);
}
static inline void intra_diamond_send_left(Parameters *p, real_t *send_buf){
if(p->halo_concat == 0){
intra_diamond_strided_send_left(p);
} else{
// receive right side
intra_diamond_concat_send_left(p, send_buf);
}
}
static inline void intra_diamond_wait_send_left(Parameters *p){
MPI_Status wait_stat[2];
MPI_Status wait_stat1;
if(p->halo_concat == 0){
ierr = MPI_Waitall(2, wait_req_send_l, wait_stat); CHKERR(ierr); // send wait
} else{
ierr = MPI_Wait(&(wait_req_send_l[0]), &wait_stat1); CHKERR(ierr); // wait send left
}
}
// SEND TO RIGHT
static inline void intra_diamond_strided_send_right(Parameters *p){
ierr = MPI_Isend(p->U1, 1, p->hu[1].send_he, p->t.up, 0, p->t.cart_comm, &(wait_req_send_r[0])); CHKERR(ierr);
ierr = MPI_Isend(p->U2, 1, p->hv[1].send_he, p->t.up, 0, p->t.cart_comm, &(wait_req_send_r[1])); CHKERR(ierr);
}
static inline void intra_diamond_concat_send_right(Parameters *p, real_t *send_buf){
// concatenate the halo data then communicate contiguous data
// assuming same halo size for both U and V buffers
int h_size = p->hu[1].size;
int *offs;
int i, j, k;
int z_offs[] = {0,0,0};
int h_offs[] = {h_size,0,0};
if( p->t.up != MPI_PROC_NULL) {
// copy the right halo of U to the buffer
sub_array_copy_tg(p->U1, send_buf, p->ldomain_shape, p->hu[1].shape, p->hu[1].shape, p->hu[1].send_e, z_offs, p->stencil_ctx.thread_group_size);
// copy the right halo of V to the buffer
sub_array_copy_tg(p->U2, send_buf, p->ldomain_shape, p->hu[1].shape, p->hu[1].shape, p->hv[1].send_e, h_offs, p->stencil_ctx.thread_group_size);
}
// send the data out
ierr = MPI_Isend(send_buf, 2*h_size, MPI_real_t, p->t.up , 0, p->t.cart_comm, &(wait_req_send_r[0])); CHKERR(ierr);
}
static inline void intra_diamond_send_right(Parameters *p, real_t *send_buf){
if(p->halo_concat == 0){
intra_diamond_strided_send_right(p);
} else{
intra_diamond_concat_send_right(p, send_buf);
}
}
static inline void intra_diamond_wait_send_right(Parameters *p){
MPI_Status wait_stat[2];
MPI_Status wait_stat1;
if(p->halo_concat == 0){
ierr = MPI_Waitall(2, wait_req_send_r, wait_stat); CHKERR(ierr); // send wait
} else{
ierr = MPI_Wait(&(wait_req_send_r[0]), &wait_stat1); CHKERR(ierr); // wait send right
}
}
// RECV FROM LEFT
static inline void intra_diamond_strided_recv_left(Parameters *p){
ierr = MPI_Irecv(p->U1, 1, p->hu[1].recv_hb, p->t.down, MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_l[0])); CHKERR(ierr);
ierr = MPI_Irecv(p->U2, 1, p->hv[1].recv_hb, p->t.down, MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_l[1])); CHKERR(ierr);
}
static inline void intra_diamond_recv_left(Parameters *p, real_t *recv_buf){
if(p->halo_concat == 0){
intra_diamond_strided_recv_left(p);
} else{
// receive left side
ierr = MPI_Irecv(recv_buf, 2*p->hu[1].size, MPI_real_t, p->t.down , MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_l[0])); CHKERR(ierr);
}
}
static inline void intra_diamond_concat_wait_recv_left(Parameters *p, real_t *recv_buf){
// assuming same halo size for both U and V buffers
int h_size = p->hu[1].size;
int *offs;
int i, j, k;
int z_offs[] = {0,0,0};
int h_offs[] = {h_size,0,0};
MPI_Status wait_stat;
// Complete receiving to copy the buffer data
ierr = MPI_Wait(&(wait_req_recv_l[0]), &wait_stat); CHKERR(ierr);
if( p->t.up != MPI_PROC_NULL) {
// copy the receive buffer to the left halo of U
sub_array_copy_tg(recv_buf, p->U1, p->hu[1].shape, p->ldomain_shape, p->hu[1].shape, z_offs, p->hu[1].recv_b, p->stencil_ctx.thread_group_size);
// copy the receive buffer to the left halo of V
sub_array_copy_tg(recv_buf, p->U2, p->hu[1].shape, p->ldomain_shape, p->hu[1].shape, h_offs, p->hv[1].recv_b, p->stencil_ctx.thread_group_size);
}
}
static inline void intra_diamond_wait_recv_left(Parameters *p, real_t *recv_buf){
MPI_Status wait_stat[2];
MPI_Status wait_stat1;
if(p->halo_concat == 0){
ierr = MPI_Waitall(2, wait_req_recv_l, wait_stat); CHKERR(ierr); // receive wait
} else{
intra_diamond_concat_wait_recv_left(p, recv_buf);
}
}
// RECV FROM RIGHT
static inline void intra_diamond_strided_recv_right(Parameters *p){
ierr = MPI_Irecv(p->U1, 1, p->hu[1].recv_he, p->t.up, MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_r[0])); CHKERR(ierr);
ierr = MPI_Irecv(p->U2, 1, p->hv[1].recv_he, p->t.up, MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_r[1])); CHKERR(ierr);
}
static inline void intra_diamond_recv_right(Parameters *p, real_t *recv_buf){
if(p->halo_concat == 0){
intra_diamond_strided_recv_right(p);
} else{
// receive right side
ierr = MPI_Irecv(recv_buf, 2*p->hu[1].size, MPI_real_t, p->t.up , MPI_ANY_TAG, p->t.cart_comm, &(wait_req_recv_r[0])); CHKERR(ierr);
}
}
static inline void intra_diamond_concat_wait_recv_right(Parameters *p, real_t *recv_buf){
// assuming same halo size for both U and V buffers
int h_size = p->hu[1].size;
int *offs;
int i, j, k;
int z_offs[] = {0,0,0};
int h_offs[] = {h_size,0,0};
MPI_Status wait_stat;
// Complete receiving to copy the buffer data
ierr = MPI_Wait(&(wait_req_recv_r[0]), &wait_stat); CHKERR(ierr);
if( p->t.down != MPI_PROC_NULL) {
// copy the receive buffer to the right halo of U
sub_array_copy_tg(recv_buf, p->U1, p->hu[1].shape, p->ldomain_shape, p->hu[1].shape, z_offs, p->hu[1].recv_e, p->stencil_ctx.thread_group_size);
// copy the receive buffer to the right halo of V
sub_array_copy_tg(recv_buf, p->U2, p->hu[1].shape, p->ldomain_shape, p->hu[1].shape, h_offs, p->hv[1].recv_e, p->stencil_ctx.thread_group_size);
}
}
static inline void intra_diamond_wait_recv_right(Parameters *p, real_t *recv_buf){
MPI_Status wait_stat[2];
if(p->halo_concat == 0){
ierr = MPI_Waitall(2, wait_req_recv_r, wait_stat); CHKERR(ierr); // receive wait
} else{
intra_diamond_concat_wait_recv_right(p, recv_buf);
}
}
// circular buffer
#define T_POS_L(y) (st.t_pos[(((y)+(y_len_l))%(y_len_l))])
#define T_POS_R(y) (st.t_pos[(((y)+(y_len_r))%(y_len_r))])
static inline void update_state(int y_coord, Parameters *p){
int sh;
st.t_pos[y_coord]++; // advance the current tile in time
if(p->is_last != 1) {
sh = ((st.t_pos[y_coord]%2 == 0) ? 1 : -1);// define the dependency direction
// add the current tile to the ready queue if its dependency is satisfied
if( (T_POS_L(y_coord+sh) >= st.t_pos[y_coord]) & (st.t_pos[y_coord] < t_len) )
{
avail_list[head%y_len_r] = y_coord;
head++;
}
// add the dependent tile to the ready queue if its other dependency is satisfied
if( (T_POS_L(y_coord-sh) == st.t_pos[y_coord]) & (T_POS_L(y_coord-sh) < t_len) )
{
avail_list[head%y_len_r] = (y_coord - sh + y_len_l)%y_len_l; // add the dependent neighbor to the list if the dependency is satisfied
head++;
}
} else { // last process (and single process case)
if(st.t_pos[y_coord]%2 == 0){ // right row case
// add the current diamond to the ready queue if dependencies are satisfied
if(st.t_pos[y_coord] < t_len){
// if left-half diamond, no dependencies. Add to the list
if(y_coord == y_len_l-1){
avail_list[head%y_len_r] = y_coord;
head++;
} else if(T_POS_R(y_coord+1) >= st.t_pos[y_coord]) {
//the reset have the same circular dependence (except the right-half diamond) if:
// 1) the current tile did not reach the end of the temporal dimension
// 2) the right neighbor is at least at the same time step
avail_list[head%y_len_r] = y_coord;
head++;
}
} // check validity in range of temporal dimension
// add the dependent diamond to the ready queue if other dependencies are satisfied:
if (T_POS_R(y_coord-1) < t_len){
// add the right-half diamond automatically when the left most diamond is updated
if(y_coord == 0){ // no dependencies. Add to the list
st.t_pos[y_len_r-1]++; // advance the right-half diamond in time
avail_list[head%y_len_r] = y_len_r-1;
head++;
}
else if(T_POS_R(y_coord-1) == st.t_pos[y_coord]) {
// 1) the neighbor did not reach the end of the temporal dimension
// 2) the left neighbor is at the same time step
// 3) is not the right-half diamond
avail_list[head%y_len_r] = (y_coord - 1 + y_len_r)%y_len_r; // add the dependent neighbor to the list if the dependency is satisfied
head++;
}
} // check validity in temporal dimension
} //end right row case
else if(st.t_pos[y_coord]%2 == 1){ // left row
// add the current diamond to the ready queue if dependencies are satisfied:
if( (T_POS_R(y_coord-1) >= st.t_pos[y_coord]) && (st.t_pos[y_coord] < t_len) && (y_coord != y_len_r-1) ) {
// 1) the left neighbor is at least at the same time step
// 2) the current diamond did not reach the end of the temporal dimension
// 3) is not the right-half diamond
avail_list[head%y_len_r] = y_coord;
head++;
}
// add the dependent diamond to the ready queue if other dependencies are satisfied:
if( (T_POS_R(y_coord+1) == st.t_pos[y_coord]) && (T_POS_R(y_coord+1) < t_len) && (y_coord != y_len_l-1) ) {
// 1) the right neighbor is at the same time step
// 2) the neighbor did not reach the end of the temporal dimension
// 3) is not the right most diamond in space
avail_list[head%y_len_r] = (y_coord + 1 + y_len_r)%y_len_r; // add the dependent neighbor to the list if the dependency is satisfied
head++;
}
} // end left row case
} //end is_last process case
}
/*void comm_dead_lock_test(MPI_Request *req, int rank, int y_coord, int t_coord, char* source) {
double db_t;
int comm_test, comm_not_complete;
MPI_Status wait_stat1;
db_t= MPI_Wtime();
comm_not_complete=1;
while (comm_not_complete) {
MPI_Test(&(req[0]), &comm_test, &wait_stat1);
if(comm_test) comm_not_complete = 0;
// assume deadlock if communication takes more than 10 seconds
else if(MPI_Wtime()-db_t > 10){
printf("[%d] DEADLOCK at %s wait t_pos[%d]=%d\n", rank, source, y_coord, t_coord);
db_t = MPI_Wtime();
}
}
}*/
static inline void intra_diamond_comm(Parameters *p, int y_coord, int t_coord){
// Start exchanging computed halo data
if(p->t.shape[1] > 1){
if( (y_coord == y_len_r-1) && (t_coord%2 == 0) ) { // right most diamond
intra_diamond_send_right(p, send_buf_r);
intra_diamond_recv_left (p, recv_buf_l);
// comm_dead_lock_test(wait_req_send_r, p->mpi_rank, y_coord, t_coord, "send right");
intra_diamond_wait_send_right(p);
// comm_dead_lock_test(wait_req_recv_l, p->mpi_rank, y_coord, t_coord, "recv left");
intra_diamond_wait_recv_left (p, recv_buf_l);
} else if( (y_coord == 0) && (t_coord%2 == 1) ){ // left most diamond
intra_diamond_send_left (p, send_buf_l);
intra_diamond_recv_right(p, recv_buf_r);
// comm_dead_lock_test(wait_req_send_l, p->mpi_rank, y_coord, t_coord, "send left");
intra_diamond_wait_send_left (p);
// comm_dead_lock_test(wait_req_recv_r, p->mpi_rank, y_coord, t_coord, "recv right");
intra_diamond_wait_recv_right(p, recv_buf_r);
}
}
}
void intra_diamond_mwd_comp_std(Parameters *p, int yb_r, int ye_r, int b_inc, int e_inc, int tb, int te, int tid){
int t, z, zb, ze;
int yb, ye;
int time_len = te-tb;
double t1, t2, t3;
// wavefront prologue
t1 = MPI_Wtime();
yb = yb_r;
ye = ye_r;
zb = p->stencil.r;
for(t=tb; t< te-1; t++){
ze = p->stencil.r*(time_len-(t-tb));
if(t%2 == 1){
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U1, p->U2, p->U3, ALL_FIELDS, p->stencil_ctx);
}else{
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U2, p->U1, p->U3, ALL_FIELDS, p->stencil_ctx);
}
if(t< p->t_dim){ // inverted trapezoid (or lower half of the diamond)
yb -= b_inc;
ye += e_inc;
}else{ // trapezoid (or upper half of the diamond)
yb += b_inc;
ye -= e_inc;
}
}
t2 = MPI_Wtime();
// main wavefront loop
yb = yb_r;
ye = ye_r;
zb = (te-tb)*p->stencil.r;
ze = p->ldomain_shape[2]-p->stencil.r;
p->stencil.mwd_func(p->ldomain_shape, p->stencil.r, yb, zb,
p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U1, p->U2, p->U3, p->t_dim, b_inc, e_inc, p->stencil.r, tb, te, p->stencil_ctx, tid);
t3 = MPI_Wtime();
// wavefront epilogue
yb = yb_r;
ye = ye_r;
ze = p->ldomain_shape[2]-p->stencil.r;
for(t=tb+1; t< te; t++){
if((t-1)< p->t_dim){ // lower half of the diamond
yb -= b_inc;
ye += e_inc;
}else{ // upper half of the diamond
yb += b_inc;
ye -= e_inc;
}
zb = p->ldomain_shape[2]-p->stencil.r - (t-tb)*p->stencil.r;
if(t%2 == 1){
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U1, p->U2, p->U3, ALL_FIELDS, p->stencil_ctx);
}else{
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U2, p->U1, p->U3, ALL_FIELDS, p->stencil_ctx);
}
}
p->stencil_ctx.t_wf_prologue[tid] += t2-t1;
p->stencil_ctx.t_wf_main[tid] += t3-t2;
p->stencil_ctx.t_wf_epilogue[tid] += MPI_Wtime() - t3;
}
void intra_diamond_mwd_comp_solar(Parameters *p, int yb_r, int ye_r, int b_inc, int e_inc, int tb, int te, int tid){
int t, z, zb, ze;
int yb, ye;
int time_len = te-tb;
double t1, t2, t3;
// wavefront prologue
t1 = MPI_Wtime();
yb = yb_r;
ye = ye_r;
zb = p->stencil.r;
for(t=tb; t< te-1; t++){
ze = p->stencil.r*(time_len-(t-tb));
// compute E-field
//printf("Main E -- t:%d yb:%d ye:%d\n", t, yb, ye);
if( (yb<ye) && (tb==0 | t!=tb)) // Update E-field when more than silce available and not first iteration of the prologue
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r,
ye, ze, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
if(t <= p->t_dim) ye += e_inc; // lower half of the diamond
else yb += b_inc; // upper half of the diamond
// compute H-field
//printf("Main H -- t:%d yb:%d ye:%d\n", t, yb, ye);
if(yb<ye) p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r,
ye, ze, p->coef, p->U1, p->U2, p->U3, H_FIELD, p->stencil_ctx);
if(t < p->t_dim) yb -= b_inc; // lower half of the diamond
else ye -= e_inc; // upper half of the diamond
}
t2 = MPI_Wtime();
// main wavefront loop
yb = yb_r;
ye = ye_r;
zb = (te-tb)*p->stencil.r;
ze = p->ldomain_shape[2]-p->stencil.r;
p->stencil.mwd_func(p->ldomain_shape, p->stencil.r, yb, zb,
p->lstencil_shape[0]+p->stencil.r, ye, ze, p->coef, p->U1, p->U2, p->U3, p->t_dim, b_inc, e_inc, p->stencil.r, tb, te, p->stencil_ctx, tid);
t3 = MPI_Wtime();
// wavefront epilogue
yb = yb_r;
ye = ye_r;
ze = p->ldomain_shape[2]-p->stencil.r;
// Update E shift
if(tb <= p->t_dim) ye += e_inc; // lower half of the diamond
else yb += b_inc; // upper half of the diamond
// Update H shift
if(tb < p->t_dim) yb -= b_inc; // lower half of the diamond
else ye -= e_inc; // upper half of the diamond
for(t=tb+1; t< te; t++){
zb = p->ldomain_shape[2]-p->stencil.r - (t-tb)*p->stencil.r;
// compute E-field
//printf("Main E -- t:%d yb:%d ye:%d\n", t, yb, ye);
if(yb<ye) p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r,
ye, ze, p->coef, p->U1, p->U2, p->U3, E_FIELD, p->stencil_ctx);
if(t <= p->t_dim) ye += e_inc; // lower half of the diamond
else yb += b_inc; // upper half of the diamond
// compute H-field
//printf("Main H -- t:%d yb:%d ye:%d\n", t, yb, ye);
// More than slice available and not epilogue's last time step
if( (yb<ye) && (te>(p->t_dim*2) | t!=te-1) )
p->stencil.stat_sched_func(p->ldomain_shape, p->stencil.r, yb, zb, p->lstencil_shape[0]+p->stencil.r,
ye, ze, p->coef, p->U1, p->U2, p->U3, H_FIELD, p->stencil_ctx);
if(t < p->t_dim) yb -= b_inc; // lower half of the diamond
else ye -= e_inc; // upper half of the diamond
}
p->stencil_ctx.t_wf_prologue[tid] += t2-t1;
p->stencil_ctx.t_wf_main[tid] += t3-t2;
p->stencil_ctx.t_wf_epilogue[tid] += MPI_Wtime() - t3;
}
static inline void intra_diamond_get_info_std(Parameters *p, int y_coord, int tid, int t_coord, int *yb, int *ye, int *b_inc, int *e_inc){
double diam_size;
if( (p->is_last == 1) && (y_coord == y_len_l-1) && (t_coord%2 == 0) ){ // right most process & left-half diamond
// left half computations
*yb = p->stencil.r + p->lstencil_shape[1] - p->stencil.r;
*ye = *yb + p->stencil.r;
*b_inc = p->stencil.r;
*e_inc = 0;
diam_size = 0.5;
}else if( (p->is_last == 1) && (y_coord == y_len_r-1) && (t_coord%2 == 0) ){ // right most process & right-half diamond
// right half computations
*b_inc = 0;
*e_inc = p->stencil.r;
if(p->t.shape[1] > 1)
*yb = p->stencil.r + p->lstencil_shape[1] + 2*p->stencil.r;
else // serial code case
*yb = p->stencil.r;
*ye = *yb + p->stencil.r;
diam_size = 0.5;
}else{ // full diamond computation
if(t_coord%2 == 0)// row shifted to the right
*yb = p->stencil.r + diam_width - p->stencil.r + y_coord*diam_width;
else// row shifted to the left
*yb = p->stencil.r + diam_width/2 - p->stencil.r+ y_coord*diam_width;
*ye = *yb + 2*p->stencil.r;
*b_inc = p->stencil.r;
*e_inc = p->stencil.r;
diam_size = 1.0;
}
p->stencil_ctx.wf_num_resolved_diamonds[tid] += diam_size;
}
void intra_diamond_mwd_comp(Parameters *p, int yb, int ye, int b_inc, int e_inc, int tb, int te, int tid){
if(p->stencil.type == REGULAR){
intra_diamond_mwd_comp_std(p, yb, ye, b_inc, e_inc, tb, te, tid);
}else if(p->stencil.type == SOLAR) {
//printf("tb %d te %d\n",tb,te);
intra_diamond_mwd_comp_solar(p, yb, ye, b_inc, e_inc, tb, te, tid);
// intra_diamond_comp(p, yb, ye, b_inc, e_inc);
}
}
static inline void intra_diamond_get_info_solar(Parameters *p, int y_coord, int tid, int t_coord, int *yb, int *ye, int *b_inc, int *e_inc){
double diam_size;
if( (p->is_last == 1) && (y_coord == y_len_l-1) && (t_coord%2 == 0) ){ // right most process & left-half diamond
// left half computations
*yb = p->stencil.r + p->lstencil_shape[1] - p->stencil.r;
*ye = *yb + p->stencil.r;
*b_inc = p->stencil.r;
*e_inc = 0;
diam_size = 0.5;
}else if( (p->is_last == 1) && (y_coord == y_len_r-1) && (t_coord%2 == 0) ){ // right most process & right-half diamond
// right half computations
*b_inc = 0;
*e_inc = p->stencil.r;
if(p->t.shape[1] > 1)
*yb = p->stencil.r + p->lstencil_shape[1] + 2*p->stencil.r;
else // serial code case
*yb = p->stencil.r;
*ye = *yb;// + p->stencil.r;
diam_size = 0.5;
}else{ // full diamond computation
if(t_coord%2 == 0)// row shifted to the right
*yb = p->stencil.r + diam_width - p->stencil.r + y_coord*diam_width;
else// row shifted to the left
*yb = p->stencil.r + diam_width/2 - p->stencil.r + y_coord*diam_width;
*ye = *yb + p->stencil.r;
*b_inc = p->stencil.r;
*e_inc = p->stencil.r;
diam_size = 1.0;
}
p->stencil_ctx.wf_num_resolved_diamonds[tid] += diam_size;
}
static inline void intra_diamond_comp_using_location(Parameters *p, int y_coord, int tid, int t_coord){
int yb, ye, b_inc, e_inc;
if(p->stencil.type == REGULAR){
intra_diamond_get_info_std(p, y_coord, tid, t_coord, &yb, &ye, &b_inc, &e_inc);
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, 0, p->t_dim*2+1, tid);
}else if(p->stencil.type == SOLAR) {
intra_diamond_get_info_solar(p, y_coord, tid, t_coord, &yb, &ye, &b_inc, &e_inc);
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, 0, (p->t_dim+1)*2, tid);
}
}
static inline void intra_diamond_resolve(Parameters *p, int y_coord, int tid){
int t_coord = st.t_pos[y_coord];
double t1, t2;
intra_diamond_comp_using_location(p, y_coord, tid, t_coord);
t1 = MPI_Wtime();
intra_diamond_comm(p, y_coord, t_coord);
t2 = MPI_Wtime();
p->stencil_ctx.t_wf_comm[tid] += t2-t1;
}
void dynamic_intra_diamond_main_loop(Parameters *p){
int not_complete, th_y_coord, i;
uint64_t il;
int num_thread_groups = get_ntg(*p);
uint64_t diam_size = y_len_l*(t_len-1)/2 + y_len_r*((t_len-1)/2 +1);
int tid;
double t1;
int idx=0;
if(p->in_auto_tuning == 0) {
for(i=0; i<y_len_r; i++){
avail_list[i] = i;
}
} else { // diversify the startup for shorter autotuning
for(i=0; i<y_len_r; i++){
if(i%2==0){
avail_list[i] = idx++;
}
}
for(i=0; i<y_len_r; i++){
if(i%2==1){
avail_list[i] = idx++;
}
}
// for(i=0; i<y_len_r; i++) printf("i:%d list:%d\n", i, avail_list[i]);
}
#pragma omp parallel num_threads(num_thread_groups) shared(head, tail) private(tid) PROC_BIND(spread)
{
// // initlaize the likwid markers according to the openmp nested parallelism
// if(p->in_auto_tuning == 0) {
// #pragma omp parallel num_threads(p->stencil_ctx.thread_group_size) PROC_BIND(master)
// {
// LIKWID_MARKER_THREADINIT;
// MARKER_START("calc");
// }
// }
tid = 0;
#if defined(_OPENMP)
tid = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic) private(il, th_y_coord, not_complete)//shared(head,tail)
for (il=0; il<diam_size; il++){
not_complete = 1;
th_y_coord = -1;
while(not_complete)
{
t1 = MPI_Wtime();
while(head-tail<1); // spin-wait for available tasks
p->stencil_ctx.t_group_wait[tid] += (MPI_Wtime() - t1);
#pragma omp critical// (consumer)
{
#pragma omp flush (head, tail)
if(head-tail>0){ // make sure there is still available work
th_y_coord = avail_list[tail%y_len_r]; //acquire task
tail++;
}
}
if(th_y_coord>=0){
intra_diamond_resolve(p, th_y_coord, tid);
#pragma omp critical// (producer)
{
#pragma omp flush (head)
update_state(th_y_coord, p);
}
not_complete = 0;
}
}
}
// // stop the markers of the experiment
// if(p->in_auto_tuning == 0) {
// #pragma omp parallel num_threads(p->stencil_ctx.thread_group_size) PROC_BIND(master)
// {
// MARKER_STOP("calc");
// }
// }
}
}
void dynamic_intra_diamond_prologue_std(Parameters *p){
// compute all the trapezoids
int i, yb, ye;
int ntg = get_ntg(*p);
#pragma omp parallel num_threads(ntg) PROC_BIND(spread)
{
int b_inc = p->stencil.r;
int e_inc = p->stencil.r;
int tid = 0;
#if defined(_OPENMP)
tid = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic) private(i,yb,ye)
for(i=0; i<y_len_l; i++){
yb = p->stencil.r + i*diam_width;
ye = yb + diam_width;
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, p->t_dim, p->t_dim*2+1, tid);
}
}
// Send the trapezoid results to the left
if(p->t.shape[1] > 1){
intra_diamond_send_left (p, send_buf_l);
intra_diamond_recv_right(p, recv_buf_r);
intra_diamond_wait_send_left (p);
intra_diamond_wait_recv_right(p, recv_buf_r);
}
}
void dynamic_intra_diamond_prologue_solar(Parameters *p){
// The only difference compared to std approach is ye-1 and te+1
// compute all the trapezoids
int i, yb, ye;
int ntg = get_ntg(*p);
#pragma omp parallel num_threads(ntg) PROC_BIND(spread)
{
int b_inc = p->stencil.r;
int e_inc = p->stencil.r;
int tid = 0;
#if defined(_OPENMP)
tid = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic) private(i,yb,ye)
for(i=0; i<y_len_l; i++){
yb = p->stencil.r + i*diam_width;
ye = yb + diam_width-1;
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, p->t_dim, p->t_dim*2+2, tid);
}
}
// Send the trapezoid results to the left
if(p->t.shape[1] > 1){
intra_diamond_send_left (p, send_buf_l);
intra_diamond_recv_right(p, recv_buf_r);
intra_diamond_wait_send_left (p);
intra_diamond_wait_recv_right(p, recv_buf_r);
}
}
void dynamic_intra_diamond_prologue(Parameters *p){
if(p->stencil.type == REGULAR){
dynamic_intra_diamond_prologue_std(p);
} else if(p->stencil.type == SOLAR){
dynamic_intra_diamond_prologue_solar(p);
}
}
void dynamic_intra_diamond_epilogue_std(Parameters *p){
int yb, ye, i;
int ntg = get_ntg(*p);
#pragma omp parallel num_threads(ntg) PROC_BIND(spread)
{
int b_inc = p->stencil.r;
int e_inc = p->stencil.r;
int yb_r = p->stencil.r + diam_width/2 - p->stencil.r;
int ye_r = yb_r + 2*p->stencil.r;
int tid = 0;
#if defined(_OPENMP)
tid = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic) private(i,yb,ye)
for(i=0; i<y_len_l; i++){
yb = yb_r + i*diam_width;
ye = ye_r + i*diam_width;
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, 0, p->t_dim+1, tid);
}
}
}
void dynamic_intra_diamond_epilogue_solar(Parameters *p){
int yb, ye, i;
int ntg = get_ntg(*p);
#pragma omp parallel num_threads(ntg) PROC_BIND(spread)
{
int b_inc = p->stencil.r;
int e_inc = p->stencil.r;
int yb_r = p->stencil.r + diam_width/2 - p->stencil.r;
int ye_r = yb_r + p->stencil.r; // difference comp. to std
int tid = 0;
#if defined(_OPENMP)
tid = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic) private(i,yb,ye)
for(i=0; i<y_len_l; i++){
yb = yb_r + i*diam_width;
ye = ye_r + i*diam_width;
intra_diamond_mwd_comp(p, yb, ye, b_inc, e_inc, 0, p->t_dim+1, tid);
}
}
}
void dynamic_intra_diamond_epilogue(Parameters *p){
if(p->stencil.type == REGULAR){
dynamic_intra_diamond_epilogue_std(p);
} else if(p->stencil.type == SOLAR){
dynamic_intra_diamond_epilogue_solar(p);
}
}
void dynamic_intra_diamond_ts(Parameters *p) {
int t_dim = p->t_dim;
diam_width = (t_dim+1) * 2 *p->stencil.r;
if(p->stencil.type == REGULAR){
t_len = 2*( (p->nt-2)/((t_dim+1)*2) ) - 1;
} else if(p->stencil.type == SOLAR){
t_len = 2*( (p->nt)/((t_dim+1)*2) ) - 1;
}
int num_thread_groups = get_ntg(*p);
y_len_l = p->lstencil_shape[1] / (diam_width);
y_len_r = y_len_l;
if(p->is_last == 1) y_len_r++;
int i, y, t;
double t1,t2,t3,t4;
int yb,ye;
double db_t;
// allocate scheduling variables
st.t_pos = (int*) malloc(y_len_r*sizeof(int));
st.state = (int*) malloc(y_len_r*sizeof(int));
avail_list = (int*) malloc(y_len_r*sizeof(int));
head=y_len_r;
tail=0;
// initialize scheduling variables
for(i=0; i<y_len_r; i++){
st.t_pos[i] = 0;
st.state[i] = ST_NOT_BUSY;
}
// create buffers to aggregate halo data for communication
int comm_buf_size;
if (p->halo_concat ==1){
// assuming same halo size for both U and V buffers
comm_buf_size = 2 * p->hu[1].shape[0] * p->hu[1].shape[1] * p->hu[1].shape[2];
posix_memalign((void **)&(recv_buf_l), p->alignment, sizeof(real_t)*comm_buf_size);
posix_memalign((void **)&(recv_buf_r), p->alignment, sizeof(real_t)*comm_buf_size);
posix_memalign((void **)&(send_buf_l), p->alignment, sizeof(real_t)*comm_buf_size);
posix_memalign((void **)&(send_buf_r), p->alignment, sizeof(real_t)*comm_buf_size);
}
#if defined(_OPENMP)
omp_set_nested(1);
#endif
// initlaize the likwid markers according to the openmp nested parallelism
if(p->in_auto_tuning == 0) {
#pragma omp parallel num_threads(num_thread_groups) PROC_BIND(spread)
{
#pragma omp parallel num_threads(p->stencil_ctx.thread_group_size) PROC_BIND(master)
{
LIKWID_MARKER_THREADINIT;
MARKER_START("calc");
}
}
}
// Prologue
t1 = MPI_Wtime();
if(p->in_auto_tuning == 0)
dynamic_intra_diamond_prologue(p);
t2 = MPI_Wtime();
// main loop
dynamic_intra_diamond_main_loop(p);
t3 = MPI_Wtime();
// Epilogue
if(p->in_auto_tuning == 0)
dynamic_intra_diamond_epilogue(p);
t4 = MPI_Wtime();
// stop the markers of the experiment
if(p->in_auto_tuning == 0) {
#pragma omp parallel num_threads(num_thread_groups) PROC_BIND(spread)
{
#pragma omp parallel num_threads(p->stencil_ctx.thread_group_size) PROC_BIND(master)
{
MARKER_STOP("calc");
}
}
}
p->prof.ts_main += (t3-t2);
p->prof.ts_others += (t2-t1) + (t4-t3);
// clean up the buffers that aggregate halo data for communication
if (p->halo_concat ==1){
free(recv_buf_l);
free(recv_buf_r);
free(send_buf_l);
free(send_buf_r);
}
// cleanup the state variables
free((void *) st.t_pos);
free(st.state);
free((void *) avail_list);
}
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include <functional>
#include <memory>
// ORT thread pool overview
// ------------------------
//
// The ORT thread pool implementation is split into two layers. This
// file provides the high-level component. See the accompanying
// comments in EigenNonBlockingThreadPool.h for the low-level
// component.
//
// threadpool.h defines the user-facing functions for use in
// operators. The main abstraction are parallel loops
// (ThreadPool::TryParallelFor*), although we also support scheduling
// of asynchronous tasks (ThreadPool::Schedule), and the construction
// of multi-loop parallel sections (ThreadPool::ParallelSection).
//
// This high level API is accessed via static methods on the
// ThreadPool class. These methods map the operations onto one of
// three low-level implementations: (#1) direct execution of the
// operations if there is no thread pool configured, (#2) execution of
// the operations using the modified Eigen threadpool, (#3) execution
// of the operations using OpenMP. Option #1 enables execution in
// simple settings without needing threads. Option #2 is the
// preferred approach for use in settings with parallelism.
//
// The high-level part of the thread pool is responsible for:
//
// - Exposing the desired degree of parallelism to user code, and to
// libraries such as MLAS. This lets the libraries tailor the
// extent to which they parallelize work.
//
// - Handling trivial cases (such as directly running parallel loops
// with only a single iteration, or with no iterations at all).
//
// - Deciding how to divide work efficiently between the threads
// available.
//
// The ThreadPool::TryParallelFor methods do this based on cost
// estimates supplied by the caller, and are designed to support
// loops with small amounts of work per iteration. The loop body is
// supplied as a function taking a [start,end) range of iterations
// to execute (avoiding the need for per-iteration std::function
// calls, or a reliance upon inlining to avoid those calls).
//
// ThreadPool::TrySimpleParallelFor uses a simpler single-iteration
// API based on the assumption that the caller has divided work to
// an appropriate granularity.
//
// - When used with the Eigen-based thread pool, the implementation of
// all of the loops maps down onto
// ThreadPool::ParallelForFixedBlockSizeScheduling. This method
// takes the degree of parallelism (d_of_p) and work distribution
// block size (from the cost-based heuristics), and creates a set of
// tasks in the underlying thread pool (via
// ThreadPool::RunInParallel).
//
// These tasks then run a loop which picks off batches of iterations
// from the user's code. The distribution of these batches is
// handled dynmamically via LoopCounter::ClaimIterations. This
// dynamic balancing behavior helps make performance robust to any
// variability in the execution time across iterations, and to
// situations such as multiple loops running concurrently on the
// same thread pool.
//
// - When running a series of loops inside a parallel section, the
// LoopCounter also helps obtain affinity between these loops (i.e.,
// iteration X of one loop will tend to run on the same thread that
// ran iteration X of prior loops). This locality helps improve hit
// rates in per-core caches across the series of short loops used in
// operators like GRU.
//
// There are some known areas for exploration here:
//
// - The cost-based heuristics were developed prior to recent changes
// to the thread pool. The heuristics seem to work well, but we
// should revisit the tuning periodically.
//
// - Can we unify the APIs for the different kinds of parallel loop?
//
// In particular, we may be able to replace the current use of
// TryBatchParallelFor with appropriate costs for each call site,
// and then use TryParallelFor. This would allow for more dynamic
// re-balancing of work between threads than the current
// ThreadPool::PartitionWork function provides.
//
// - Given the extensive modifications to original Eigen code, should
// we separate that out as a new class and remove the dependence on
// other Eigen components.
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
namespace concurrency {
template <typename Environment>
class ThreadPoolTempl;
class ExtendedThreadPoolInterface;
class LoopCounter;
class ThreadPoolParallelSection;
class ThreadPool {
public:
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool for running with with "degree_of_parallelism" threads with
// specified "name". env->StartThread() is used to create individual threads
// with the given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: degree_of_parallelism > 0
ThreadPool(Env* env,
const ThreadOptions& thread_options,
const NAME_CHAR_TYPE* name,
int degree_of_parallelism,
bool low_latency_hint);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Start and end a multi-loop parallel section. Parallel loops can
// be executed directly (without using this API), but entering a
// parallel section allows the runtime system to amortize loop
// entry/exit costs over multiple loops, and allows it to promote
// affinity between corresponding iterations of different loops.
//
// Multi-loop sections would typically be used in cases where a
// series of loops executes without much code in between them, and
// where it is impractical to refactor code into a single loop. For
// instance:
//
// {
// onnxruntime::concurrency::ThreadPoool::ParallelSection ps(tp);
// for (int x = 0; x < seq_len; x++) {
// TrySimpleParallelFor(tp, 16, [&]() { ... });
// }
// }
//
// The parallel section is entered via the constructor of
// ThreadPool::ParallelSection, and exited via the destructor.
// Currently, thread-local state is used to track whether or not the
// current thread is inside a parallel section. In contrast to
// handling parallel section objects explicitly in user code, this
// approach allows code such as MLAS to operate with/without the use
// of parallel sections.
//
// Parallel sections are only implemented with the Eigen threadpool.
// They have no effect when using OpenMP.
//
// Parallel sections may not be nested, and may not be used inside
// parallel loops.
class ParallelSection {
public:
explicit ParallelSection(ThreadPool *tp);
~ParallelSection();
private:
friend class ThreadPool;
// Owning reference for the underlying ThreadPoolParallelSection
// which implements the thread management. We use an explicit
// deleter here so that the definition of
// ThreadPoolParallelSection does not need to be available at this
// point to avoid a dependence on the Eigen headers.
std::unique_ptr<ThreadPoolParallelSection, void(*)(ThreadPoolParallelSection*)>
ps_{nullptr, [](ThreadPoolParallelSection*){}};
#ifndef _OPENMP
ThreadPool *tp_;
#endif
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ParallelSection);
// Non-owning reference to the current thread's paralel section
// (or nullptr outside parallel sections).
static thread_local ParallelSection *current_parallel_section;
static_assert(std::is_trivially_destructible<decltype(current_parallel_section)>::value,
"Per-thread state should be trivially destructible");
};
// Schedules fn() for execution in the pool of threads. The function may run
// synchronously if it cannot be enqueued. This will occur if the thread pool's
// degree-of-parallelism is 1, but it may also occur for implementation-dependent
// reasons such as if queues used for buffering work are full.
static void Schedule(ThreadPool* tp,
std::function<void()> fn) {
if (tp) {
tp->Schedule(fn);
} else {
fn();
}
}
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total,
const std::function<void(std::ptrdiff_t)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp != nullptr) {
tp->SimpleParallelFor(total, fn);
} else {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
}
#endif
}
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of DegreeOfParallelism().
*\param fn A std::function or STL style functor with signature of "void f(std::ptrdiff_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<std::ptrdiff_t>(total, DegreeOfParallelism(tp));
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
auto work = PartitionWork(batch_index, num_batches, total);
for (std::ptrdiff_t i = work.start; i < work.end; i++) {
fn(i);
}
});
#endif
}
struct WorkInfo {
std::ptrdiff_t start{0};
std::ptrdiff_t end{0};
};
/** Calculate the start and end offsets for a batch.
@remarks Based on MlasPartitionWork
*/
constexpr static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) {
const std::ptrdiff_t work_per_batch = total_work / num_batches;
const std::ptrdiff_t work_per_batch_extra = total_work % num_batches;
WorkInfo info;
if (batch_idx < work_per_batch_extra) {
info.start = (work_per_batch + 1) * batch_idx;
info.end = info.start + work_per_batch + 1;
} else {
info.start = work_per_batch * batch_idx + work_per_batch_extra;
info.end = info.start + work_per_batch;
}
return info;
}
//......................................................................
//
// The following static methods take into account whether OpenMP is
// enabled/disabled, and if the thread pool pointer is nullptr
// during sequential execution.
// Provide a hint to the caller for whether or not to parallelize
// work. This lets a caller switch to a sequential version of an
// algorithm rather than using calls via the ParallelFor functions.
static bool ShouldParallelize(const ThreadPool* tp);
// Return the degree of parallelism that code should assume when using the thread pool.
// It decouples the degree of parallelism for use with the thread pool from
// the implementation choice of whether this matches the number of threads created in
// the pool.
//
// Currently, a loop with degree-of-parallelism N is supported by a pool of N-1 threads
// working in combination with the thread initiating the loop.
static int DegreeOfParallelism(const ThreadPool* tp);
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
// StartProfiling and StopProfiling are not to be consumed as public-facing API
static void StartProfiling(concurrency::ThreadPool* tp);
static std::string StopProfiling(concurrency::ThreadPool* tp);
private:
friend class LoopCounter;
// Returns the number of threads created in the pool. This may be different from the
// value returned by DegreeOfParallelism to code using the pool.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// Run fn with up to n degree-of-parallelism enlisting the thread pool for
// help. The degree-of-parallelism includes the caller, and so if n==1
// then the function will run directly in the caller. The fork-join
// synchronization is handled in the thread pool, and so any state captured
// by fn() is safe from concurrent access once RunWithHelp returns.
void RunInParallel(std::function<void(unsigned idx)> fn, unsigned n, std::ptrdiff_t block_size);
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
// Return whether or not the calling thread should run a loop of
// num_iterations divided in chunks of block_size in parallel. If not,
// the caller should run the loop sequentially.
bool ShouldParallelizeLoop(const std::ptrdiff_t num_iterations,
const std::ptrdiff_t block_size = 1) const;
// Internal (non-static) parallel loop methods. Unlike the public static methods,
// these will not handle the cases of OpenMP builds. or builds without a threadpool.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn);
void Schedule(std::function<void()> fn);
void StartProfiling();
std::string StopProfiling();
ThreadOptions thread_options_;
// If a thread pool is created with degree_of_parallelism != 1 then an underlying
// EigenThreadPool is used to create OS threads and handle work distribution to them.
// If degree_of_parallelism == 1 then underlying_threadpool_ is left as nullptr
// and parallel work is run directly by the caller.
ExtendedThreadPoolInterface* underlying_threadpool_ = nullptr;
// If used, underlying_threadpool_ is instantiated and owned by the ThreadPool.
std::unique_ptr<ThreadPoolTempl<Env> > extended_eigen_threadpool_;
};
} // namespace concurrency
} // namespace onnxruntime
|
picolrn.c | /*
* This code is released under the MIT License.
* Copyright (c) 2013 Nenad Markus
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdint.h>
#include <omp.h>
// hyperparameters
#define NRANDS 128
/*
auxiliary stuff
*/
#define MAX(a, b) ((a)>(b)?(a):(b))
#define MIN(a, b) ((a)<(b)?(a):(b))
#define SQR(x) ((x)*(x))
/*
portable time function
*/
#ifdef __GNUC__
#include <time.h>
float getticks()
{
struct timespec ts;
if(clock_gettime(CLOCK_MONOTONIC, &ts) < 0)
return -1.0f;
return ts.tv_sec + 1e-9f*ts.tv_nsec;
}
#else
#include <windows.h>
float getticks()
{
static double freq = -1.0;
LARGE_INTEGER lint;
if(freq < 0.0)
{
if(!QueryPerformanceFrequency(&lint))
return -1.0f;
freq = lint.QuadPart;
}
if(!QueryPerformanceCounter(&lint))
return -1.0f;
return (float)( lint.QuadPart/freq );
}
#endif
/*
multiply with carry PRNG
*/
uint32_t mwcrand_r(uint64_t* state)
{
uint32_t* m;
//
m = (uint32_t*)state;
// bad state?
if(m[0] == 0)
m[0] = 0xAAAA;
if(m[1] == 0)
m[1] = 0xBBBB;
// mutate state
m[0] = 36969 * (m[0] & 65535) + (m[0] >> 16);
m[1] = 18000 * (m[1] & 65535) + (m[1] >> 16);
// output
return (m[0] << 16) + m[1];
}
uint64_t prngglobal = 0x12345678000fffffLL;
void smwcrand(uint32_t seed)
{
prngglobal = 0x12345678000fffffLL*seed;
}
uint32_t mwcrand()
{
return mwcrand_r(&prngglobal);
}
/*
*/
#define MAX_N 4000000
int nimages = 0;
uint8_t* ppixels[MAX_N];
int pdims[MAX_N][2]; // (nrows, ncols)
int contents[MAX_N][2]; // (i, j) where i is the start index and j-1 is the ending index
int nobjects = 0;
int objects[MAX_N][4]; // (r, c, s, i)
int load_image(uint8_t* pixels[], int* nrows, int* ncols, FILE* file)
{
/*
- loads an 8-bit grey image saved in the <RID> file format
- <RID> file contents:
- a 32-bit signed integer h (image height)
- a 32-bit signed integer w (image width)
- an array of w*h unsigned bytes representing pixel intensities
*/
//
if(fread(nrows, sizeof(int), 1, file) != 1)
return 0;
if(fread(ncols, sizeof(int), 1, file) != 1)
return 0;
//
*pixels = (uint8_t*)malloc(*nrows**ncols*sizeof(uint8_t));
if(!*pixels)
return 0;
// read pixels
if(fread(*pixels, sizeof(uint8_t), *nrows**ncols, file) != *nrows**ncols)
return 0;
// we're done
return 1;
}
int load_training_data(char* path)
{
FILE* file;
//
file = fopen(path, "rb");
if(!file)
return 0;
//
nimages = 0;
nobjects = 0;
while( load_image(&ppixels[nimages], &pdims[nimages][0], &pdims[nimages][1], file) )
{
int i, n;
//
contents[nimages][0] = nobjects;
if(fread(&n, sizeof(int), 1, file) != 1)
return 1;
for(i=0; i<n; ++i)
{
fread(&objects[nobjects][0], sizeof(int), 1, file); // r
fread(&objects[nobjects][1], sizeof(int), 1, file); // c
fread(&objects[nobjects][2], sizeof(int), 1, file); // s
objects[nobjects][3] = nimages; // i
//
++nobjects;
}
contents[nimages][1] = contents[nimages][0] + n;
//
++nimages;
}
//
return 1;
}
void delete_training_data()
{
int i;
for(i=0; i<nimages; ++i)
{
free(ppixels[i]);
ppixels[i] = 0;
}
nimages = 0;
}
/*
regression trees
*/
int bintest(int32_t tcode, int r, int c, int s, int iind)
{
//
int r1, c1, r2, c2;
int8_t* p = (int8_t*)&tcode;
//
r1 = (256*r + p[0]*s)/256;
c1 = (256*c + p[1]*s)/256;
r2 = (256*r + p[2]*s)/256;
c2 = (256*c + p[3]*s)/256;
//
r1 = MIN(MAX(0, r1), pdims[iind][0]-1);
c1 = MIN(MAX(0, c1), pdims[iind][1]-1);
r2 = MIN(MAX(0, r2), pdims[iind][0]-1);
c2 = MIN(MAX(0, c2), pdims[iind][1]-1);
//
return ppixels[iind][r1*pdims[iind][1]+c1]<=ppixels[iind][r2*pdims[iind][1]+c2];
}
float get_split_error(int32_t tcode, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int indsnum)
{
int i;
double wsum, wsum0, wsum1;
double wtvalsum0, wtvalsumsqr0, wtvalsum1, wtvalsumsqr1;
double wmse0, wmse1;
//
wsum = wsum0 = wsum1 = wtvalsum0 = wtvalsum1 = wtvalsumsqr0 = wtvalsumsqr1 = 0.0;
for(i=0; i<indsnum; ++i)
{
if( bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) )
{
wsum1 += ws[inds[i]];
wtvalsum1 += ws[inds[i]]*tvals[inds[i]];
wtvalsumsqr1 += ws[inds[i]]*SQR(tvals[inds[i]]);
}
else
{
wsum0 += ws[inds[i]];
wtvalsum0 += ws[inds[i]]*tvals[inds[i]];
wtvalsumsqr0 += ws[inds[i]]*SQR(tvals[inds[i]]);
}
wsum += ws[inds[i]];
}
//
wmse0 = wtvalsumsqr0 - SQR(wtvalsum0)/wsum0;
wmse1 = wtvalsumsqr1 - SQR(wtvalsum1)/wsum1;
//
return (float)( (wmse0 + wmse1)/wsum );
}
int split_training_data(int32_t tcode, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int ninds)
{
int stop;
int i, j;
int n0;
//
stop = 0;
i = 0;
j = ninds - 1;
while(!stop)
{
//
while( !bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) )
{
if( i==j )
break;
else
++i;
}
while( bintest(tcode, rs[inds[j]], cs[inds[j]], ss[inds[j]], iinds[inds[j]]) )
{
if( i==j )
break;
else
--j;
}
//
if( i==j )
stop = 1;
else
{
// swap
inds[i] = inds[i] ^ inds[j];
inds[j] = inds[i] ^ inds[j];
inds[i] = inds[i] ^ inds[j];
}
}
//
n0 = 0;
for(i=0; i<ninds; ++i)
if( !bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) )
++n0;
//
return n0;
}
int32_t get_random_tcode(int8_t* bbox)
{
int32_t tcode;
int8_t* p;
//
p = (int8_t*)&tcode;
//
p[0] = bbox[0] + mwcrand()%(bbox[1]-bbox[0]+1);
p[1] = bbox[2] + mwcrand()%(bbox[3]-bbox[2]+1);
p[2] = bbox[0] + mwcrand()%(bbox[1]-bbox[0]+1);
p[3] = bbox[2] + mwcrand()%(bbox[3]-bbox[2]+1);
//
return tcode;
}
int grow_subtree(int32_t tcodes[], float lut[], int nodeidx, int d, int maxd, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int ninds, int8_t* bbox)
{
int i, nrands;
int32_t tmptcodes[2048];
float es[2048], e;
int n0;
//
if(d == maxd)
{
int lutidx;
double tvalaccum, wsum;
//
lutidx = nodeidx - ((1<<maxd)-1);
// compute output: a simple average
tvalaccum = 0.0;
wsum = 0.0;
for(i=0; i<ninds; ++i)
{
tvalaccum += ws[inds[i]]*tvals[inds[i]];
wsum += ws[inds[i]];
}
if(wsum == 0.0)
lut[lutidx] = 0.0f;
else
lut[lutidx] = (float)( tvalaccum/wsum );
//
return 1;
}
else if(ninds <= 1)
{
//
tcodes[nodeidx] = 0;
//
grow_subtree(tcodes, lut, 2*nodeidx+1, d+1, maxd, tvals, rs, cs, ss, iinds, ws, inds, ninds, bbox);
grow_subtree(tcodes, lut, 2*nodeidx+2, d+1, maxd, tvals, rs, cs, ss, iinds, ws, inds, ninds, bbox);
return 1;
}
// generate binary test codes
nrands = NRANDS;
for(i=0; i<nrands; ++i)
tmptcodes[i] = get_random_tcode(bbox);
//
#pragma omp parallel for
for(i=0; i<nrands; ++i)
es[i] = get_split_error(tmptcodes[i], tvals, rs, cs, ss, iinds, ws, inds, ninds);
//
e = es[0];
tcodes[nodeidx] = tmptcodes[0];
for(i=1; i<nrands; ++i)
if(e > es[i])
{
e = es[i];
tcodes[nodeidx] = tmptcodes[i];
}
//
n0 = split_training_data(tcodes[nodeidx], tvals, rs, cs, ss, iinds, ws, inds, ninds);
//
grow_subtree(tcodes, lut, 2*nodeidx+1, d+1, maxd, tvals, rs, cs, ss, iinds, ws, &inds[0], n0, bbox);
grow_subtree(tcodes, lut, 2*nodeidx+2, d+1, maxd, tvals, rs, cs, ss, iinds, ws, &inds[n0], ninds-n0, bbox);
//
return 1;
}
int grow_rtree(int32_t tcodes[], float lut[], int d, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int n, int8_t* bbox)
{
int i;
int* inds;
//
inds = (int*)malloc(n*sizeof(int));
for(i=0; i<n; ++i)
inds[i] = i;
//
if(!grow_subtree(tcodes, lut, 0, 0, d, tvals, rs, cs, ss, iinds, ws, inds, n, bbox))
{
free(inds);
return 0;
}
else
{
free(inds);
return 1;
}
}
/*
*/
int32_t version = 3;
int tdepth;
int ntrees=0;
int8_t bbox[4]; // (r_min, r_max, c_min, c_max)
int32_t tcodes[4096][1024];
float luts[4096][1024];
float thresholds[4096];
/*
*/
int load_cascade_from_file(FILE* file)
{
int i;
//
fread(&version, sizeof(int32_t), 1, file);
fread(&bbox[0], sizeof(int8_t), 4, file);
fread(&tdepth, sizeof(int), 1, file);
fread(&ntrees, sizeof(int), 1, file);
for(i=0; i<ntrees; ++i)
{
//
fread(&tcodes[i][0], sizeof(int32_t), (1<<tdepth)-1, file);
fread(&luts[i][0], sizeof(float), 1<<tdepth, file);
fread(&thresholds[i], sizeof(float), 1, file);
}
//
return 1;
}
int save_cascade_to_file(FILE* file)
{
int i;
//
fwrite(&version, sizeof(int32_t), 1, file);
fwrite(&bbox[0], sizeof(int8_t), 4, file);
fwrite(&tdepth, sizeof(int), 1, file);
fwrite(&ntrees, sizeof(int), 1, file);
for(i=0; i<ntrees; ++i)
{
//
fwrite(&tcodes[i][0], sizeof(int32_t), (1<<tdepth)-1, file);
fwrite(&luts[i][0], sizeof(float), 1<<tdepth, file);
fwrite(&thresholds[i], sizeof(float), 1, file);
}
//
return 1;
}
/*
*/
float get_tree_output(int i, int r, int c, int s, int iind)
{
int idx, j;
//
idx = 1;
for(j=0; j<tdepth; ++j)
idx = 2*idx + bintest(tcodes[i][idx-1], r, c, s, iind);
//
return luts[i][idx - (1<<tdepth)];
}
int classify_region(float* o, int r, int c, int s, int iind)
{
int i;
//
*o = 0.0f;
if(!ntrees)
return 1;
//
for(i=0; i<ntrees; ++i)
{
//
*o += get_tree_output(i, r, c, s, iind);
//
if(*o <= thresholds[i])
return -1;
}
//
return 1;
}
float learn_new_stage(float mintpr, float maxfpr, int maxntrees, float tvals[], int rs[], int cs[], int ss[], int iinds[], float os[], int np, int nn)
{
int i;
double* ws;
double wsum;
float threshold, tpr, fpr;
//
printf("* learning a new stage ...\n");
//
ws = (double*)malloc((np+nn)*sizeof(double));
//
maxntrees = ntrees + maxntrees;
fpr = 1.0f;
while(ntrees<maxntrees && fpr>maxfpr)
{
float t;
int numtps, numfps;
//
t = getticks();
// compute weights ...
wsum = 0.0;
for(i=0; i<np+nn; ++i)
{
if(tvals[i] > 0)
ws[i] = exp(-1.0*os[i])/np;
else
ws[i] = exp(+1.0*os[i])/nn;
wsum += ws[i];
}
for(i=0; i<np+nn; ++i)
ws[i] /= wsum;
// grow a tree ...
grow_rtree(tcodes[ntrees], luts[ntrees], tdepth, tvals, rs, cs, ss, iinds, ws, np+nn, bbox);
thresholds[ntrees] = -1337.0f;
++ntrees;
// update outputs ...
for(i=0; i<np+nn; ++i)
{
float o;
//
o = get_tree_output(ntrees-1, rs[i], cs[i], ss[i], iinds[i]);
//
os[i] += o;
}
// get threshold ...
threshold = 5.0f;
do
{
//
threshold -= 0.001f;
numtps = 0;
numfps = 0;
//
for(i=0; i<np+nn; ++i)
{
if( tvals[i]>0 && os[i]>threshold)
++numtps;
if( tvals[i]<0 && os[i]>threshold)
++numfps;
}
//
tpr = numtps/(float)np;
fpr = numfps/(float)nn;
}
while(tpr<mintpr);
printf(" ** tree %d (%d [s]) ... stage tpr=%f, stage fpr=%f\n", ntrees, (int)(getticks()-t), tpr, fpr);
fflush(stdout);
}
//
thresholds[ntrees-1] = threshold;
printf(" ** threshold set to %f\n", threshold);
//
free(ws);
//
return fpr;
}
int find_objects
(
float rcsq[], int maxndetections,
int iind,
float scalefactor, float stridefactor, float minsize, float maxsize
)
{
float s;
int ndetections, nrows, ncols;
//
nrows = pdims[iind][0];
ncols = pdims[iind][1];
ndetections = 0;
s = minsize;
while(s<=maxsize)
{
float r, c, dr, dc;
//
dr = dc = MAX(stridefactor*s, 1.0f);
//
for(r=s/2+1; r<=nrows-s/2-1; r+=dr)
for(c=s/2+1; c<=ncols-s/2-1; c+=dc)
{
float q;
int t;
t = classify_region(&q, r, c, s, iind);
if(1==t)
{
if(ndetections < maxndetections)
{
rcsq[4*ndetections+0] = r;
rcsq[4*ndetections+1] = c;
rcsq[4*ndetections+2] = s;
rcsq[4*ndetections+3] = q;
//
++ndetections;
}
}
}
//
s = scalefactor*s;
}
//
return ndetections;
}
float get_overlap(float r1, float c1, float s1, float r2, float c2, float s2)
{
float overr, overc;
//
overr = MAX(0, MIN(r1+s1/2, r2+s2/2) - MAX(r1-s1/2, r2-s2/2));
overc = MAX(0, MIN(c1+s1/2, c2+s2/2) - MAX(c1-s1/2, c2-s2/2));
//
return overr*overc/(s1*s1+s2*s2-overr*overc);
}
void search_for_training_data(float tvals[], int rs[], int cs[], int ss[], int iinds[], float os[], int* np, int* nn, float subsf)
{
int i, n = 0;
#define NUMPRNGS 1024
static int prngsinitialized = 0;
static uint64_t prngs[NUMPRNGS];
if(!prngsinitialized)
{
// initialize a PRNG for each thread
for(i=0; i<NUMPRNGS; ++i)
prngs[i] = 0xFFFF*mwcrand() + 0xFFFF1234FFFF0001LL*mwcrand();
//
prngsinitialized = 1;
}
*np = 0;
*nn = 0;
#pragma omp parallel for
for(i=0; i<nimages; ++i)
{
int thid = omp_get_thread_num();
#define MAXNDETS 8192
float dets[4*MAXNDETS];
int ndets = find_objects(dets, MAXNDETS, i, 1.1f, 0.1f, 24, 1000);
//printf("%d -> %d %d %d\n", i, ndets, *np, *nn); fflush(stdout);
int j, k;
for(j=0; j<ndets; ++j)
{
int assigned = 0;
for(k=contents[i][0]; k<contents[i][1]; ++k)
{
float overlap = get_overlap(dets[4*j+0], dets[4*j+1], dets[4*j+2], objects[k][0], objects[k][1], objects[k][2]);
if(overlap > 0.6f && *np<MAX_N/2 && n<MAX_N)
#pragma omp critical
{
// true positive
//
rs[n] = dets[4*j+0];
cs[n] = dets[4*j+1];
ss[n] = dets[4*j+2];
os[n] = dets[4*j+3];
iinds[n] = i;
tvals[n] = +1;
//
++n;
++*np;
}
if(overlap > 0.4f)
assigned = 1;
}
if(!assigned && (mwcrand_r(&prngs[thid])%1000)/999.0f<subsf && *nn<3*MAX_N/4 && *nn<*np && n<MAX_N)
#pragma omp critical
{
// false positive
//
rs[n] = dets[4*j+0];
cs[n] = dets[4*j+1];
ss[n] = dets[4*j+2];
os[n] = dets[4*j+3];
iinds[n] = i;
tvals[n] = -1;
//
++n;
++*nn;
}
}
}
}
float sample_training_data(float tvals[], int rs[], int cs[], int ss[], int iinds[], float os[], int* np, int* nn)
{
int i, j, n;
int64_t nw;
float etpr, efpr;
int t;
#define NUMPRNGS 1024
static int prngsinitialized = 0;
static uint64_t prngs[NUMPRNGS];
int stop;
//
t = getticks();
//
n = 0;
/*
object samples
*/
if(nobjects > MAX_N)
{
printf("* nobjects is too large ... aborting ...\n");
return -1.0f;
}
//
for(i=0; i<nobjects; ++i)
{
//
int r, c, s, iind;
iind = objects[i][3];
r = objects[i][0];
c = objects[i][1];
s = objects[i][2];
//
if( classify_region(&os[n], r, c, s, iind) == 1 )
{
//
rs[n] = r;
cs[n] = c;
ss[n] = s;
iinds[n] = iind;
tvals[n] = +1;
//
++n;
}
}
*np = n;
/*
non-object samples
*/
if(!prngsinitialized)
{
// initialize a PRNG for each thread
for(i=0; i<NUMPRNGS; ++i)
prngs[i] = 0xFFFF*mwcrand() + 0xFFFF1234FFFF0001LL*mwcrand();
//
prngsinitialized = 1;
}
//
nw = 0;
*nn = 0;
stop = 0;
#pragma omp parallel
{
int thid;
//
thid = omp_get_thread_num();
while(!stop)
{
/*
data mine hard negatives
*/
float o;
int iind, s, r, c;
// should we sample based on image size?
iind = mwcrand_r(&prngs[thid])%nimages;
//if (contents[iind][0] != contents[iind][1])
// continue;
//
r = mwcrand_r(&prngs[thid])%pdims[iind][0];
c = mwcrand_r(&prngs[thid])%pdims[iind][1];
s = objects[mwcrand_r(&prngs[thid])%nobjects][2]; // sample the size of a random object in the pool
//
if( classify_region(&o, r, c, s, iind) == 1 )
{
// check if the region intersects with a true positive
// this could probably be done more effciently but we do not expect a large number of objects per image
int i, ok = 1;
for(i=contents[iind][0]; i<contents[iind][1]; ++i)
if(get_overlap(r, c, s, objects[i][0], objects[i][1], objects[i][2]) > 0.5f)
ok = 0;
if(ok)
#pragma omp critical
{
//we have a false positive ...
if(*nn<*np)
{
rs[n] = r;
cs[n] = c;
ss[n] = s;
iinds[n] = iind;
os[n] = o;
tvals[n] = -1;
//
++n;
++*nn;
}
else
stop = 1;
}
}
if(!stop)
{
#pragma omp atomic
++nw;
}
}
}
/*
print the estimated true positive and false positive rates
*/
etpr = *np/(float)(nobjects);
efpr = (float)( *nn/(double)nw );
printf("* sampling finished ...\n");
printf(" ** elapsed time: %d\n", (int)(getticks()-t));
printf(" ** cascade TPR=%.8f\n", etpr);
printf(" ** cascade FPR=%.8f (%d/%lld)\n", efpr, *nn, (long long int)nw);
/*
*/
return efpr;
}
/*
*/
static int rs[2*MAX_N];
static int cs[2*MAX_N];
static int ss[2*MAX_N];
static int iinds[2*MAX_N];
static float tvals[2*MAX_N];
static float os[2*MAX_N];
int learn_a_cascade(char* savepath, int8_t bb[], int _tdepth, float stagetpr, float stagefpr, int maxntreesperstage)
{
int np, nn, i;
FILE* f = 0;
//
bbox[0] = bb[0];
bbox[1] = bb[1];
bbox[2] = bb[2];
bbox[3] = bb[3];
ntrees = 0;
tdepth = _tdepth;
//
while(1)
{
float efpr = sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn);
if(efpr<0.01f)
break;
learn_new_stage(stagetpr, stagefpr, maxntreesperstage, tvals, rs, cs, ss, iinds, os, np, nn);
printf("\n");
}
//
float subsf = 0.01f;
for(i=0; i<5; ++i)
{
//
printf("* scanning in progress\n");
search_for_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn, subsf);
printf("* starting training with np=%d, nn=%d ...\n", np, nn);
learn_new_stage(stagetpr*stagetpr, stagefpr*stagefpr, maxntreesperstage, tvals, rs, cs, ss, iinds, os, np, nn);
//
if(savepath)
{
f = fopen(savepath, "wb");
save_cascade_to_file(f);
fclose(f);
}
// just for estimating FPR for random sampling
sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn);
//
subsf *= 3;
}
//
printf("* learning process finished\n");
return 1;
}
/*
*/
const char* howto()
{
return
"./picolrn <trdata> <cascade-write-path>\n"
;
}
#ifndef NO_PICOLRN_MAIN
int main(int argc, char* argv[])
{
FILE* file = 0;
// initialize the PRNG
smwcrand(time(0));
if(argc == 3)
{
/*
* training with default params
* args: <trdata> <output-cascade>
*/
if(!load_training_data(argv[1]))
{
printf("* cannot load training data ...\n");
return 0;
}
int8_t bb[] = {-127, +127, -127, +127};
learn_a_cascade(argv[2], bb, 6, 0.98f, 0.4f, 16);
file = fopen(argv[2], "wb");
if(!file || !save_cascade_to_file(file))
{
printf("* cannot save result to specified destination\n");
return 1;
}
fclose(file);
}
else if(argc == 6)
{
/*
* initializing a new cascade with 0 trees
* args: <bbox[0]> <bbox[1]> <bbox[2]> <bbox[3]> <tdepth>
*/
int tdepth;
sscanf(argv[1], "%hhd", &bbox[0]);
sscanf(argv[2], "%hhd", &bbox[1]);
sscanf(argv[3], "%hhd", &bbox[2]);
sscanf(argv[4], "%hhd", &bbox[3]);
sscanf(argv[5], "%d", &tdepth);
ntrees = 0;
save_cascade_to_file(stdout);
}
else if(argc == 7)
{
/*
* append a new stage to an existing cascade
* args: <current-cascade> <trdata> <tpr> <fpr> <ntrees> <new-cascade>
*/
float stagetpr, stagefpr;
int maxntreesforstage, np, nn;
file = fopen(argv[1], "rb");
if(!file || !load_cascade_from_file(file))
{
printf("* cannot load a cascade from '%s'\n", argv[1]);
return 1;
}
fclose(file);
if(!load_training_data(argv[2]))
{
printf("* cannot load the training data from '%s'\n", argv[2]);
return 1;
}
sscanf(argv[3], "%f", &stagetpr);
sscanf(argv[4], "%f", &stagefpr);
sscanf(argv[5], "%d", &maxntreesforstage);
float efpr = sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn);
if(efpr < 0.01f)
search_for_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn, 0.05f);
printf("* starting training with np=%d, nn=%d ...\n", np, nn);
learn_new_stage(stagetpr, stagefpr, maxntreesforstage, tvals, rs, cs, ss, iinds, os, np, nn);
file = fopen(argv[6], "wb");
if(!file || !save_cascade_to_file(file))
{
printf("* cannot save result to specified destination\n");
return 1;
}
fclose(file);
}
else
{
printf("%s", howto());
}
//
return 0;
}
#endif |
nsfactor.c | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <ctype.h>
#include <string.h>
#include "futil.h"
#include "gmx_random.h"
#include "smalloc.h"
#include "sysstuff.h"
#include "strdb.h"
#include "vec.h"
#include "nsfactor.h"
#include "gmx_omp.h"
void check_binwidth(real binwidth)
{
real smallest_bin = 0.1;
if (binwidth < smallest_bin)
{
gmx_fatal(FARGS, "Binwidth shouldnt be smaller then smallest bond length (H-H bond ~0.1nm) in a box");
}
}
void check_mcover(real mcover)
{
if (mcover > 1.0)
{
gmx_fatal(FARGS, "mcover should be -1 or (0,1]");
}
else if ((mcover < 0)&(mcover != -1))
{
gmx_fatal(FARGS, "mcover should be -1 or (0,1]");
}
else
{
return;
}
}
void normalize_probability(int n, double *a)
{
int i;
double norm = 0.0;
for (i = 0; i < n; i++)
{
norm += a[i];
}
for (i = 0; i < n; i++)
{
a[i] /= norm;
}
}
gmx_neutron_atomic_structurefactors_t *gmx_neutronstructurefactors_init(const char *datfn)
{
/* read nsfactor.dat */
FILE *fp;
char line[STRLEN];
int nralloc = 10;
int n, p;
int i, line_no;
char atomnm[8];
double slength;
gmx_neutron_atomic_structurefactors_t *gnsf;
fp = libopen(datfn);
line_no = 0;
/* allocate memory for structure */
snew(gnsf, nralloc);
snew(gnsf->atomnm, nralloc);
snew(gnsf->p, nralloc);
snew(gnsf->n, nralloc);
snew(gnsf->slength, nralloc);
gnsf->nratoms = line_no;
while (get_a_line(fp, line, STRLEN))
{
i = line_no;
if (sscanf(line, "%s %d %d %lf", atomnm, &p, &n, &slength) == 4)
{
gnsf->atomnm[i] = strdup(atomnm);
gnsf->n[i] = n;
gnsf->p[i] = p;
gnsf->slength[i] = slength;
line_no++;
gnsf->nratoms = line_no;
if (line_no == nralloc)
{
nralloc++;
srenew(gnsf->atomnm, nralloc);
srenew(gnsf->p, nralloc);
srenew(gnsf->n, nralloc);
srenew(gnsf->slength, nralloc);
}
}
else
{
fprintf(stderr, "WARNING: Error in file %s at line %d ignored\n",
datfn, line_no);
}
}
srenew(gnsf->atomnm, gnsf->nratoms);
srenew(gnsf->p, gnsf->nratoms);
srenew(gnsf->n, gnsf->nratoms);
srenew(gnsf->slength, gnsf->nratoms);
fclose(fp);
return (gmx_neutron_atomic_structurefactors_t *) gnsf;
}
gmx_sans_t *gmx_sans_init (t_topology *top, gmx_neutron_atomic_structurefactors_t *gnsf)
{
gmx_sans_t *gsans = NULL;
int i, j;
/* Try to assing scattering length from nsfactor.dat */
snew(gsans, 1);
snew(gsans->slength, top->atoms.nr);
/* copy topology data */
gsans->top = top;
for (i = 0; i < top->atoms.nr; i++)
{
for (j = 0; j < gnsf->nratoms; j++)
{
if (top->atoms.atom[i].atomnumber == gnsf->p[j])
{
/* we need special case for H and D */
if (top->atoms.atom[i].atomnumber == 1)
{
if (top->atoms.atom[i].m == 1.008000)
{
gsans->slength[i] = gnsf->slength[0];
}
else
{
gsans->slength[i] = gnsf->slength[1];
}
}
else
{
gsans->slength[i] = gnsf->slength[j];
}
}
}
}
return (gmx_sans_t *) gsans;
}
gmx_radial_distribution_histogram_t *calc_radial_distribution_histogram (
gmx_sans_t *gsans,
rvec *x,
matrix box,
atom_id *index,
int isize,
double binwidth,
gmx_bool bMC,
gmx_bool bNORM,
real mcover,
unsigned int seed)
{
gmx_radial_distribution_histogram_t *pr = NULL;
rvec dist;
double rmax;
int i, j;
#ifdef GMX_OPENMP
double **tgr;
int tid;
int nthreads;
gmx_rng_t *trng = NULL;
#endif
gmx_large_int_t mc = 0, max;
gmx_rng_t rng = NULL;
/* allocate memory for pr */
snew(pr, 1);
/* set some fields */
pr->binwidth = binwidth;
/*
* create max dist rvec
* dist = box[xx] + box[yy] + box[zz]
*/
rvec_add(box[XX], box[YY], dist);
rvec_add(box[ZZ], dist, dist);
rmax = norm(dist);
pr->grn = (int)floor(rmax/pr->binwidth)+1;
rmax = pr->grn*pr->binwidth;
snew(pr->gr, pr->grn);
if (bMC)
{
/* Special case for setting automaticaly number of mc iterations to 1% of total number of direct iterations */
if (mcover == -1)
{
max = (gmx_large_int_t)floor(0.5*0.01*isize*(isize-1));
}
else
{
max = (gmx_large_int_t)floor(0.5*mcover*isize*(isize-1));
}
rng = gmx_rng_init(seed);
#ifdef GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
snew(tgr, nthreads);
snew(trng, nthreads);
for (i = 0; i < nthreads; i++)
{
snew(tgr[i], pr->grn);
trng[i] = gmx_rng_init(gmx_rng_uniform_uint32(rng));
}
#pragma omp parallel shared(tgr,trng,mc) private(tid,i,j)
{
tid = gmx_omp_get_thread_num();
/* now starting parallel threads */
#pragma omp for
for (mc = 0; mc < max; mc++)
{
i = (int)floor(gmx_rng_uniform_real(trng[tid])*isize);
j = (int)floor(gmx_rng_uniform_real(trng[tid])*isize);
if (i != j)
{
tgr[tid][(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
}
/* collecting data from threads */
for (i = 0; i < pr->grn; i++)
{
for (j = 0; j < nthreads; j++)
{
pr->gr[i] += tgr[j][i];
}
}
/* freeing memory for tgr and destroying trng */
for (i = 0; i < nthreads; i++)
{
sfree(tgr[i]);
gmx_rng_destroy(trng[i]);
}
sfree(tgr);
sfree(trng);
#else
for (mc = 0; mc < max; mc++)
{
i = (int)floor(gmx_rng_uniform_real(rng)*isize);
j = (int)floor(gmx_rng_uniform_real(rng)*isize);
if (i != j)
{
pr->gr[(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
#endif
gmx_rng_destroy(rng);
}
else
{
#ifdef GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
/* Allocating memory for tgr arrays */
snew(tgr, nthreads);
for (i = 0; i < nthreads; i++)
{
snew(tgr[i], pr->grn);
}
#pragma omp parallel shared(tgr) private(tid,i,j)
{
tid = gmx_omp_get_thread_num();
/* starting parallel threads */
#pragma omp for
for (i = 0; i < isize; i++)
{
for (j = 0; j < i; j++)
{
tgr[tid][(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
}
/* collecating data for pr->gr */
for (i = 0; i < pr->grn; i++)
{
for (j = 0; j < nthreads; j++)
{
pr->gr[i] += tgr[j][i];
}
}
/* freeing memory for tgr */
for (i = 0; i < nthreads; i++)
{
sfree(tgr[i]);
}
sfree(tgr);
#else
for (i = 0; i < isize; i++)
{
for (j = 0; j < i; j++)
{
pr->gr[(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
#endif
}
/* normalize if needed */
if (bNORM)
{
normalize_probability(pr->grn, pr->gr);
}
snew(pr->r, pr->grn);
for (i = 0; i < pr->grn; i++)
{
pr->r[i] = (pr->binwidth*i+pr->binwidth*0.5);
}
return (gmx_radial_distribution_histogram_t *) pr;
}
gmx_static_structurefactor_t *convert_histogram_to_intensity_curve (gmx_radial_distribution_histogram_t *pr, double start_q, double end_q, double q_step)
{
gmx_static_structurefactor_t *sq = NULL;
int i, j;
/* init data */
snew(sq, 1);
sq->qn = (int)floor((end_q-start_q)/q_step);
snew(sq->q, sq->qn);
snew(sq->s, sq->qn);
for (i = 0; i < sq->qn; i++)
{
sq->q[i] = start_q+i*q_step;
}
if (start_q == 0.0)
{
sq->s[0] = 1.0;
for (i = 1; i < sq->qn; i++)
{
for (j = 0; j < pr->grn; j++)
{
sq->s[i] += (pr->gr[j]/pr->r[j])*sin(sq->q[i]*pr->r[j]);
}
sq->s[i] /= sq->q[i];
}
}
else
{
for (i = 0; i < sq->qn; i++)
{
for (j = 0; j < pr->grn; j++)
{
sq->s[i] += (pr->gr[j]/pr->r[j])*sin(sq->q[i]*pr->r[j]);
}
sq->s[i] /= sq->q[i];
}
}
return (gmx_static_structurefactor_t *) sq;
}
|
nested_lwt_thread_num.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
__attribute__ ((noinline)) // workaround for bug in icc
void print_task_info_at(int ancestor_level, int id)
{
#pragma omp critical
{
int task_type;
char buffer[2048];
ompt_data_t *parallel_data;
ompt_data_t *task_data;
int thread_num;
ompt_get_task_info(ancestor_level, &task_type, &task_data, NULL,
¶llel_data, &thread_num);
format_task_type(task_type, buffer);
printf("%" PRIu64 ": ancestor_level=%d id=%d task_type=%s=%d "
"parallel_id=%" PRIu64 " task_id=%" PRIu64
" thread_num=%d\n",
ompt_get_thread_data()->value, ancestor_level, id, buffer,
task_type, parallel_data->value, task_data->value, thread_num);
}
};
__attribute__ ((noinline)) // workaround for bug in icc
void print_innermost_task_info(int id)
{
print_task_info_at(0, id);
}
int main()
{
#pragma omp parallel num_threads(2)
{
// sync threads before checking the output
#pragma omp barrier
// region 0
if (omp_get_thread_num() == 1) {
// executed by worker thread only
// assert that thread_num is 1
print_innermost_task_info(1);
#pragma omp parallel num_threads(1)
{
// serialized region 1
// assert that thread_num is 0
print_innermost_task_info(2);
#pragma omp parallel num_threads(1)
{
// serialized region 2
// assert that thread_num is 0
print_innermost_task_info(3);
// Check the value of thread_num while iterating over the hierarchy
// of active tasks.
print_task_info_at(0, 3);
print_task_info_at(1, 2);
print_task_info_at(2, 1);
}
}
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id=[[PARALLEL_ID_0:[0-9]+]], task_id=[[TASK_ID_0:[0-9]+]], actual_parallelism=1, index=1, flags=1
// region 0
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[TASK_ID_0]],
// CHECK-SAME: parallel_id=[[PARALLEL_ID_1:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_1]], task_id=[[TASK_ID_1:[0-9]+]]
// CHECK-DAG: {{^}}[[WORKER_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_1]], task_id=[[TASK_ID_2:[0-9]+]]
// assert some info about implicit task executed by worker thread
// thread_num is the most important
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=0 id=1
// CHECK-SAME: parallel_id=[[PARALLEL_ID_1]] task_id=[[TASK_ID_2]]
// CHECK-SAME: thread_num=1
// serialized region 1
// CHECK: {{^}}[[WORKER_ID]]: ompt_event_parallel_begin: parent_task_id=[[TASK_ID_2]],
// CHECK-SAME: parallel_id=[[PARALLEL_ID_2:[0-9]+]]
// CHECK-DAG: {{^}}[[WORKER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_2]], task_id=[[TASK_ID_3:[0-9]+]]
// assert some information about the implicit task of the serialized region 1
// pay attention that thread_num should take value 0
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=0 id=2
// CHECK-SAME: parallel_id=[[PARALLEL_ID_2]] task_id=[[TASK_ID_3]]
// CHECK-SAME: thread_num=0
// serialized region 2
// CHECK: {{^}}[[WORKER_ID]]: ompt_event_parallel_begin: parent_task_id=[[TASK_ID_3]],
// CHECK-SAME: parallel_id=[[PARALLEL_ID_3:[0-9]+]]
// CHECK-DAG: {{^}}[[WORKER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_3]], task_id=[[TASK_ID_4:[0-9]+]]
// assert some information about the implicit task of the serialized region 2
// pay attention that thread_num should take value 0
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=0 id=3
// CHECK-SAME: parallel_id=[[PARALLEL_ID_3]] task_id=[[TASK_ID_4]]
// CHECK-SAME: thread_num=0
// Check the value of thread_num argument while iterating over the hierarchy
// of active tasks. The expected is that thread_num takes the value checked
// above in the test case (0, 0, 1 - respectively).
// Thread is the master thread of the region 2, so thread_num should be 0.
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=0 id=3
// CHECK-SAME: parallel_id=[[PARALLEL_ID_3]] task_id=[[TASK_ID_4]]
// CHECK-SAME: thread_num=0
// Thread is the master thread of the region 1, so thread_num should be 0.
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=1 id=2
// CHECK-SAME: parallel_id=[[PARALLEL_ID_2]] task_id=[[TASK_ID_3]]
// CHECK-SAME: thread_num=0
// Thread is the worker thread of the region 0, so thread_num should be 1.
// CHECK: {{^}}[[WORKER_ID]]: ancestor_level=2 id=1
// CHECK-SAME: parallel_id=[[PARALLEL_ID_1]] task_id=[[TASK_ID_2]]
// CHECK-SAME: thread_num=1
return 0;
}
|
libsvm_parser.h | /*!
* Copyright (c) 2015 by Contributors
* \file libsvm_parser.h
* \brief iterator parser to parse libsvm format
* \author Tianqi Chen
*/
#ifndef XGBOOST_IO_LIBSVM_PARSER_H_
#define XGBOOST_IO_LIBSVM_PARSER_H_
#define NOMINMAX
#include <vector>
#include <cstring>
#include <cctype>
#include <algorithm>
#include "../utils/omp.h"
#include "../utils/utils.h"
#include "../sync/sync.h"
#include "../utils/thread_buffer.h"
#include "./sparse_batch_page.h"
namespace xgboost {
namespace io {
/*! \brief page returned by libsvm parser */
struct LibSVMPage : public SparsePage {
std::vector<float> label;
// overload clear
inline void Clear() {
SparsePage::Clear();
label.clear();
}
};
/*!
* \brief libsvm parser that parses the input lines
* and returns rows in input data
* factory that was used by threadbuffer template
*/
class LibSVMPageFactory {
public:
LibSVMPageFactory()
: bytes_read_(0), at_head_(true) {
}
inline bool Init(void) {
return true;
}
inline void Setup(dmlc::InputSplit *source,
int nthread) {
source_ = source;
int maxthread;
#pragma omp parallel
{
maxthread = omp_get_num_procs();
}
maxthread = std::max(maxthread / 2, 1);
nthread_ = std::min(maxthread, nthread);
}
inline void SetParam(const char *name, const char *val) {}
inline bool LoadNext(std::vector<LibSVMPage> *data) {
return FillData(data);
}
inline void FreeSpace(std::vector<LibSVMPage> *a) {
delete a;
}
inline std::vector<LibSVMPage> *Create(void) {
return new std::vector<LibSVMPage>();
}
inline void BeforeFirst(void) {
utils::Assert(at_head_, "cannot call beforefirst");
}
inline void Destroy(void) {
delete source_;
}
inline size_t bytes_read(void) const {
return bytes_read_;
}
protected:
inline bool FillData(std::vector<LibSVMPage> *data) {
dmlc::InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
int nthread;
#pragma omp parallel num_threads(nthread_)
{
nthread = omp_get_num_threads();
}
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
utils::Assert(chunk.size != 0, "LibSVMParser.FileData");
char *head = reinterpret_cast<char*>(chunk.dptr);
#pragma omp parallel num_threads(nthread_)
{
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
char *pbegin = BackFindEndLine(head + sbegin, head);
char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send, head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
}
return true;
}
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
inline void ParseBlock(char *begin,
char *end,
LibSVMPage *out) {
using namespace std;
out->Clear();
char *p = begin;
while (p != end) {
while (isspace(*p) && p != end) ++p;
if (p == end) break;
char *head = p;
while (isdigit(*p) && p != end) ++p;
if (*p == ':') {
out->data.push_back(SparseBatch::Entry(atol(head),
static_cast<bst_float>(atof(p + 1))));
} else {
if (out->label.size() != 0) {
out->offset.push_back(out->data.size());
}
out->label.push_back(static_cast<float>(atof(head)));
}
while (!isspace(*p) && p != end) ++p;
}
if (out->label.size() != 0) {
out->offset.push_back(out->data.size());
}
utils::Check(out->label.size() + 1 == out->offset.size(),
"LibSVMParser inconsistent");
}
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward
*/
inline char* BackFindEndLine(char *bptr,
char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r') return bptr;
}
return begin;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// at beginning, at end of stream
bool at_head_;
// source split that provides the data
dmlc::InputSplit *source_;
};
class LibSVMParser : public utils::IIterator<LibSVMPage> {
public:
explicit LibSVMParser(dmlc::InputSplit *source,
int nthread)
: at_end_(false), data_ptr_(0), data_(NULL) {
itr.SetParam("buffer_size", "2");
itr.get_factory().Setup(source, nthread);
itr.Init();
}
virtual void BeforeFirst(void) {
itr.BeforeFirst();
}
virtual bool Next(void) {
if (at_end_) return false;
while (true) {
if (data_ == NULL || data_ptr_ >= data_->size()) {
if (!itr.Next(data_)) {
at_end_ = true; return false;
} else {
data_ptr_ = 0;
}
}
while (data_ptr_ < data_->size()) {
data_ptr_ += 1;
if ((*data_)[data_ptr_ - 1].Size() != 0) {
return true;
}
}
}
return true;
}
virtual const LibSVMPage &Value(void) const {
return (*data_)[data_ptr_ - 1];
}
inline size_t bytes_read(void) const {
return itr.get_factory().bytes_read();
}
private:
bool at_end_;
size_t data_ptr_;
std::vector<LibSVMPage> *data_;
utils::ThreadBuffer<std::vector<LibSVMPage>*, LibSVMPageFactory> itr;
};
} // namespace io
} // namespace xgboost
#endif // XGBOOST_IO_LIBSVM_PARSER_H_
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 0; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
#define PARALLEL_A() { \
_Pragma("omp parallel num_threads(33) if (0)") \
{ \
int tid = omp_get_thread_num(); \
int cs = N / omp_get_num_threads(); \
int lb = tid * cs; \
int ub = (tid+1)*cs; \
ub = ub > N ? N : ub; \
for (int i = lb; i < ub; i++) { \
A[i] = D[i]; \
} \
_Pragma("omp barrier") \
double sum = 0; \
for (int i = 1+tid; i < N; i++) { \
sum += A[i]; \
} \
_Pragma("omp barrier") \
A[tid] = sum; \
sum = 0; \
for (int i = 2+tid; i < N; i++) { \
sum += A[i]; \
} \
_Pragma("omp barrier") \
A[tid+1] = sum; \
_Pragma("omp barrier") \
B[tid] = A[tid]-A[tid+1]; \
} \
}
#define BODY_B() { \
int tid = omp_get_thread_num(); \
int cs = N / omp_get_num_threads(); \
int lb = tid * cs; \
int ub = (tid+1)*cs; \
ub = ub > N ? N : ub; \
for (int i = lb; i < ub; i++) { \
A[i] = D[i]; \
} \
_Pragma("omp barrier") \
double sum = 0; \
for (int i = 1+tid; i < N; i++) { \
sum += A[i]; \
} \
_Pragma("omp barrier") \
A[tid] = sum; \
_Pragma("omp barrier") \
C[tid] = A[tid]-A[tid+1]-tid; \
if (tid < omp_get_num_threads()-1) B[tid] += C[tid]; \
}
#define PARALLEL_B() { \
_Pragma("omp parallel num_threads(threads[0])") \
{ \
BODY_B(); \
} \
}
#define PARALLEL_B5() { PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() }
#define BODY_NP() { \
_Pragma("omp parallel num_threads(16)") { \
int b = omp_get_thread_num()*16; \
_Pragma("omp parallel num_threads(16)") { \
int tid = omp_get_thread_num(); \
int cs = N / omp_get_num_threads(); \
int lb = tid * cs; \
int ub = (tid+1)*cs; \
ub = ub > N ? N : ub; \
for (int i = lb; i < ub; i++) { \
A[i] = D[i]; \
} \
_Pragma("omp barrier") \
double sum = 0; \
for (int i = 1+tid; i < N; i++) { \
sum += A[i]; \
} \
_Pragma("omp barrier") \
A[tid] = sum; \
_Pragma("omp barrier") \
C[tid] = A[tid]-A[tid+1]-tid; \
if (tid < omp_get_num_threads()-1) B[b+tid] += C[tid]; else B[b+tid]=0; \
} \
} \
}
int main(void) {
check_offloading();
double A[N+2], B[N+2], C[N+2], D[N+2], E[N+2];
INIT();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int gpu_threads = 768;
int cpu_threads = 32;
int max_threads = cpuExec ? cpu_threads : gpu_threads;
//
// Test: Barrier in a serialized parallel region.
//
TESTD("omp target teams num_teams(1) thread_limit(32)", {
PARALLEL_A()
}, VERIFY(0, 1, B[i], i+1));
//
// Test: Barrier in a parallel region.
//
for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) {
int threads[1]; threads[0] = t;
TESTD("omp target teams num_teams(1) thread_limit(max_threads)", {
ZERO(B);
PARALLEL_B5()
}, VERIFY(0, threads[0]-1, B[i], 5));
}
DUMP_SUCCESS(gpu_threads-max_threads);
//
// Test: Barrier in consecutive parallel regions with variable # of threads.
//
TESTD("omp target teams num_teams(1) thread_limit(max_threads)", {
ZERO(B);
for (int t = 32; t <= max_threads; t += 32) {
int threads[1]; threads[0] = t;
PARALLEL_B()
}
}, VERIFY(0, max_threads-1, B[i], (max_threads / 32) - (i+1) / 32));
//
// Test: Single thread in target region.
//
TESTD("#pragma omp target", {
ZERO(B);
BODY_B()
}, VERIFY(0, 1, C[i], 491535));
//
// Test: Barrier in target parallel.
//
for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) {
ZERO(B);
int threads; threads = t;
TESTD("omp target parallel num_threads(threads)", {
BODY_B();
}, VERIFY(0, t-1, B[i], (trial+1)*1.0));
}
DUMP_SUCCESS(gpu_threads-max_threads);
#if 0
//
// Test: Barrier in nested parallel in target region.
//
if (!cpuExec) {
ZERO(B);
TEST({
BODY_NP();
}, VERIFY(0, 16*16, B[i], (i > 0 && (i+1) % 16 == 0 ? 0 : (trial+1)*1)) );
} else {
DUMP_SUCCESS(1);
}
#endif
DUMP_SUCCESS(1);
// target parallel + parallel
// target + simd
// target/teams/parallel with varying numbers of threads
}
|
omp_loop.h | // -*- C++ -*-
// Copyright (C) 2007-2019 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
#include <parallel/base.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, etc.).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already
* processed elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end,
_Op __o, _Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f._M_finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
|
GB_binop__isge_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_int64
// A.*B function (eWiseMult): GB_AemultB__isge_int64
// A*D function (colscale): GB_AxD__isge_int64
// D*A function (rowscale): GB_DxB__isge_int64
// C+=B function (dense accum): GB_Cdense_accumB__isge_int64
// C+=b function (dense accum): GB_Cdense_accumb__isge_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int64
// C=scalar+B GB_bind1st__isge_int64
// C=scalar+B' GB_bind1st_tran__isge_int64
// C=A+scalar GB_bind2nd__isge_int64
// C=A'+scalar GB_bind2nd_tran__isge_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT64 || GxB_NO_ISGE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
irbuilder_unroll_partial_factor_for_collapse.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_factor_for_collapse(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[M_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[DOTOMP_IV:.+]] = alloca i64, align 8
// CHECK-NEXT: %[[TMP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[TMP1:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTCAPTURE_EXPR_:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[J:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTCAPTURE_EXPR_2:.+]] = alloca i64, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTUNROLLED_IV_J:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTOMP_LB:.+]] = alloca i64, align 8
// CHECK-NEXT: %[[DOTOMP_UB:.+]] = alloca i64, align 8
// CHECK-NEXT: %[[DOTOMP_STRIDE:.+]] = alloca i64, align 8
// CHECK-NEXT: %[[DOTOMP_IS_LAST:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[I6:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTUNROLLED_IV_J7:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTUNROLL_INNER_IV_J:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32 %[[M:.+]], i32* %[[M_ADDR]], align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load i32, i32* %[[M_ADDR]], align 4
// CHECK-NEXT: store i32 %[[TMP0]], i32* %[[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: store i32 0, i32* %[[J]], align 4
// CHECK-NEXT: %[[TMP1_1:.+]] = load i32, i32* %[[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP1_1]], 0
// CHECK-NEXT: %[[DIV:.+]] = sdiv i32 %[[SUB]], 1
// CHECK-NEXT: %[[CONV:.+]] = sext i32 %[[DIV]] to i64
// CHECK-NEXT: %[[MUL:.+]] = mul nsw i64 %[[CONV]], 2
// CHECK-NEXT: %[[SUB3:.+]] = sub nsw i64 %[[MUL]], 1
// CHECK-NEXT: store i64 %[[SUB3]], i64* %[[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: store i32 0, i32* %[[DOTUNROLLED_IV_J]], align 4
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[DOTCAPTURE_EXPR_]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 0, %[[TMP2]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[OMP_PRECOND_THEN:.+]], label %[[OMP_PRECOND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_PRECOND_THEN]]:
// CHECK-NEXT: store i64 0, i64* %[[DOTOMP_LB]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i64, i64* %[[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: store i64 %[[TMP3]], i64* %[[DOTOMP_UB]], align 8
// CHECK-NEXT: store i64 1, i64* %[[DOTOMP_STRIDE]], align 8
// CHECK-NEXT: store i32 0, i32* %[[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @3)
// CHECK-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[DOTOMP_IS_LAST]], i64* %[[DOTOMP_LB]], i64* %[[DOTOMP_UB]], i64* %[[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK-NEXT: %[[TMP4:.+]] = load i64, i64* %[[DOTOMP_UB]], align 8
// CHECK-NEXT: %[[TMP5:.+]] = load i64, i64* %[[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: %[[CMP8:.+]] = icmp sgt i64 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP8]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i64, i64* %[[DOTCAPTURE_EXPR_2]], align 8
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: %[[TMP7:.+]] = load i64, i64* %[[DOTOMP_UB]], align 8
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i64 [ %[[TMP6]], %[[COND_TRUE]] ], [ %[[TMP7]], %[[COND_FALSE]] ]
// CHECK-NEXT: store i64 %[[COND]], i64* %[[DOTOMP_UB]], align 8
// CHECK-NEXT: %[[TMP8:.+]] = load i64, i64* %[[DOTOMP_LB]], align 8
// CHECK-NEXT: store i64 %[[TMP8]], i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: br label %[[OMP_INNER_FOR_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_INNER_FOR_COND]]:
// CHECK-NEXT: %[[TMP9:.+]] = load i64, i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i64, i64* %[[DOTOMP_UB]], align 8
// CHECK-NEXT: %[[CMP10:.+]] = icmp sle i64 %[[TMP9]], %[[TMP10]]
// CHECK-NEXT: br i1 %[[CMP10]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_INNER_FOR_BODY]]:
// CHECK-NEXT: %[[TMP11:.+]] = load i64, i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: %[[DIV12:.+]] = sdiv i64 %[[TMP11]], 2
// CHECK-NEXT: %[[MUL13:.+]] = mul nsw i64 %[[DIV12]], 1
// CHECK-NEXT: %[[ADD:.+]] = add nsw i64 0, %[[MUL13]]
// CHECK-NEXT: %[[CONV14:.+]] = trunc i64 %[[ADD]] to i32
// CHECK-NEXT: store i32 %[[CONV14]], i32* %[[I6]], align 4
// CHECK-NEXT: %[[TMP12:.+]] = load i64, i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i64, i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: %[[DIV15:.+]] = sdiv i64 %[[TMP13]], 2
// CHECK-NEXT: %[[MUL16:.+]] = mul nsw i64 %[[DIV15]], 2
// CHECK-NEXT: %[[SUB17:.+]] = sub nsw i64 %[[TMP12]], %[[MUL16]]
// CHECK-NEXT: %[[MUL18:.+]] = mul nsw i64 %[[SUB17]], 4
// CHECK-NEXT: %[[ADD19:.+]] = add nsw i64 0, %[[MUL18]]
// CHECK-NEXT: %[[CONV20:.+]] = trunc i64 %[[ADD19]] to i32
// CHECK-NEXT: store i32 %[[CONV20]], i32* %[[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: %[[TMP14:.+]] = load i32, i32* %[[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: store i32 %[[TMP14]], i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: br label %[[FOR_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[FOR_COND]]:
// CHECK-NEXT: %[[TMP15:.+]] = load i32, i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[DOTUNROLLED_IV_J7]], align 4
// CHECK-NEXT: %[[ADD21:.+]] = add nsw i32 %[[TMP16]], 4
// CHECK-NEXT: %[[CMP22:.+]] = icmp sle i32 %[[TMP15]], %[[ADD21]]
// CHECK-NEXT: br i1 %[[CMP22]], label %[[LAND_RHS:.+]], label %[[LAND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[LAND_RHS]]:
// CHECK-NEXT: %[[TMP17:.+]] = load i32, i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: %[[CMP24:.+]] = icmp sle i32 %[[TMP17]], 8
// CHECK-NEXT: br label %[[LAND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[LAND_END]]:
// CHECK-NEXT: %[[TMP18:.+]] = phi i1 [ false, %[[FOR_COND]] ], [ %[[CMP24]], %[[LAND_RHS]] ]
// CHECK-NEXT: br i1 %[[TMP18]], label %[[FOR_BODY:.+]], label %[[FOR_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[FOR_BODY]]:
// CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: %[[MUL26:.+]] = mul nsw i32 %[[TMP19]], 1
// CHECK-NEXT: %[[ADD27:.+]] = add nsw i32 0, %[[MUL26]]
// CHECK-NEXT: store i32 %[[ADD27]], i32* %[[J]], align 4
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I6]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I6]], align 4
// CHECK-NEXT: %[[IDXPROM28:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX29:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM28]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX29]], align 4
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[J]], align 4
// CHECK-NEXT: %[[IDXPROM30:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX31:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM30]]
// CHECK-NEXT: %[[TMP28:.+]] = load float, float* %[[ARRAYIDX31]], align 4
// CHECK-NEXT: %[[MUL32:.+]] = fmul float %[[TMP25]], %[[TMP28]]
// CHECK-NEXT: %[[ADD33:.+]] = fadd float %[[TMP22]], %[[MUL32]]
// CHECK-NEXT: %[[TMP29:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP30:.+]] = load i32, i32* %[[I6]], align 4
// CHECK-NEXT: %[[IDXPROM34:.+]] = sext i32 %[[TMP30]] to i64
// CHECK-NEXT: %[[ARRAYIDX35:.+]] = getelementptr inbounds float, float* %[[TMP29]], i64 %[[IDXPROM34]]
// CHECK-NEXT: %[[TMP31:.+]] = load float, float* %[[ARRAYIDX35]], align 4
// CHECK-NEXT: %[[ADD36:.+]] = fadd float %[[TMP31]], %[[ADD33]]
// CHECK-NEXT: store float %[[ADD36]], float* %[[ARRAYIDX35]], align 4
// CHECK-NEXT: br label %[[FOR_INC:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[FOR_INC]]:
// CHECK-NEXT: %[[TMP32:.+]] = load i32, i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: %[[INC:.+]] = add nsw i32 %[[TMP32]], 1
// CHECK-NEXT: store i32 %[[INC]], i32* %[[DOTUNROLL_INNER_IV_J]], align 4
// CHECK-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[FOR_END]]:
// CHECK-NEXT: br label %[[OMP_BODY_CONTINUE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_BODY_CONTINUE]]:
// CHECK-NEXT: br label %[[OMP_INNER_FOR_INC:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_INNER_FOR_INC]]:
// CHECK-NEXT: %[[TMP33:.+]] = load i64, i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: %[[ADD37:.+]] = add nsw i64 %[[TMP33]], 1
// CHECK-NEXT: store i64 %[[ADD37]], i64* %[[DOTOMP_IV]], align 8
// CHECK-NEXT: br label %[[OMP_INNER_FOR_COND]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_INNER_FOR_END]]:
// CHECK-NEXT: br label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM38:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @5)
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM38]])
// CHECK-NEXT: br label %[[OMP_PRECOND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_PRECOND_END]]:
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM39:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @7)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @6, i32 %[[OMP_GLOBAL_THREAD_NUM39]])
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_factor_for_collapse(int m, float *a, float *b, float *c, float *d) {
#pragma omp for collapse(2)
for (int i = 0; i < m; i++) {
#pragma omp unroll partial(4)
for (int j = 0; j < 8; j++) {
a[i] += b[i] + c[i] * d[j];
}
}
}
#endif // HEADER
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.mustprogress"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
|
init.c | // Test the placement of XOMP_init() in C/C++ input
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc, char* argv[])
{
if (argc <2 )
exit (1);
#pragma omp parallel
#pragma omp master
{
printf("Number of threads = %d\n", omp_get_num_threads());
}
return 0;
}
|
NLmean_propag2dirs_sspacing3_tspacing8_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing3.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing3_tspacing8_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 3
#define SCALE_FACTOR_TIME 8
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
pi_omp.c | /*
* Copyright (c) 2009-2010, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Oracle nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/*
*
*/
#define num_steps 200000000
int
main(int argc, char** argv) {
double pi = 0;
int i;
#ifdef _OPENMP
omp_set_num_threads(4);
omp_set_dynamic(0);
#endif
#pragma omp parallel for reduction(+:pi)
for (i = 0; i < num_steps ; i++) {
pi += 1.0/(i*4.0 + 1.0);
pi -= 1.0/(i*4.0 + 3.0);
}
pi = pi * 4.0;
printf("pi done - %f\n", pi);
return (EXIT_SUCCESS);
}
|
GB_unop__tgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tgamma_fp64_fp64
// op(A') function: GB_unop_tran__tgamma_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tgamma (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tgamma_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tgamma_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-target-exit-data.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(int x) {
#pragma omp target exit data map(from \
: x)
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-exit-data.c:3:1, line:6:1> line:3:6 test 'void (int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1>
// CHECK-NEXT: `-OMPTargetExitDataDirective {{.*}} <line:4:9, line:5:38> openmp_standalone_directive
// CHECK-NEXT: |-OMPMapClause {{.*}} <line:4:30, line:5:37>
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:36> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:4:9>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:9>
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-exit-data.c:4:9) *const restrict'
|
learner.c | #include <stdlib.h>
#include <stdbool.h>
#include <sys/mman.h>
#include <stdio.h>
#include <stdint.h>
#include "../corpus/corpus_io.h"
#include "../tagger/tags.h"
#include "../lib/hashmap.h"
#include "learner.h"
#include "../rules/rules.h"
#include "../util/dynamic_array.h"
#include "../tagger/tagger.h"
#include "../dictionary/dictionary_reduce.h"
contextual_rule_t learned_rules[NUMRULES];
size_t learned_rule_index = 0;
HASHMAP_FUNCS_CREATE(error, int, error_t);
struct hashmap global_hashmap;
/*void learner_init(){
int size = sizeof(learned_rules)/sizeof(rules_list_t);
for(int i = 0; i < size; i++){
learned_rules
}
learned_rules
//initialize_dynamic_array(&learned_rules, 2, sizeof(contextual_rule_t*));
}*/
/*contextual_rule_t instantiate_rule(int fn, int tag1, int tag2){
return
}*/
/*void add_rule(contextual_rule_t *rule){
add_to_dynamic_array(&learned_rules, rule);
}*/
void find_best_rule(corpus_t corpus){
sorted_error_list_t *errors = error_frequencies(corpus);
int maximprovement = -1;
contextual_rule_t current_rule;
printf("Here is the length: %zu\n", errors->length);
for(int i = 0; i < errors->length; i++){
error_t error = errors->errors[i];
pattern_t pattern = find_patterns(corpus, error); // finds the most frequent prev and next tags
printf("prev3: %d prev2: %d prev1: %d next1: %d next2: %d next3: %d\n", pattern.prevtag3,pattern.prevtag2, pattern.prevtag1, pattern.nexttag1, pattern.nexttag2, pattern.nexttag3);
current_rule.tag1 = error.machine_tag;
current_rule.tag2 = error.human_tag;
for(int ii = 0; ii < sizeof(contextual_rules); ii++){
current_rule.triggerfn = ii;
instantiate_rule(pattern, ii, ¤t_rule);
int improvement = get_rule_error_improvement(corpus, current_rule, error);
if(improvement>maximprovement){
maximprovement = improvement;
learned_rules[learned_rule_index].tag1 = current_rule.tag1;
learned_rules[learned_rule_index].tag2 = current_rule.tag2;
learned_rules[learned_rule_index].arg1 = current_rule.arg1;
learned_rules[learned_rule_index].arg2 = current_rule.arg2;
learned_rules[learned_rule_index].triggerfn = current_rule.triggerfn;
}
}
if(i==errors->errors[i].number-1 || maximprovement >= errors->errors[i+1].number)
break;
}
learned_rule_index++;
apply_rule_to_corpus(learned_rules[learned_rule_index], corpus);
}
/*rule num is the index in the ruls array*/
void instantiate_rule(pattern_t pattern, int rule_num, contextual_rule_t *rule){
int tmp;
switch(rule_num){
case PREV_TAG_IS:
rule->arg1 = pattern.prevtag1;
rule->arg2 = NONE;
break;
case NEXT_TAG_IS:
rule->arg1 = pattern.nexttag1;
rule->arg2 = NONE;
break;
case PREV_2_TAG_IS:
rule->arg1 = pattern.prevtag2;
rule->arg2 = NONE;
break;
case NEXT_2_TAG_IS:
rule->arg1 = pattern.nexttag2;
rule->arg2 = NONE;
break;
case PREV_1_OR_2_TAG_IS:
rule->arg1 = (pattern.prev1freq >= pattern.prev2freq)?pattern.prevtag1:pattern.prevtag2;
rule->arg2 = NONE;
break;
case NEXT_1_OR_2_TAG_IS:
rule->arg1 = (pattern.next1freq >= pattern.next2freq)?pattern.nexttag1:pattern.nexttag2;
rule->arg2 = NONE;
break;
case PREV_1_OR_2_OR_3_TAG_IS:
rule->arg1 = (
(tmp = ((pattern.prev1freq >= pattern.prev2freq)?
pattern.prevtag1
:pattern.prevtag2))
>pattern.prev3freq)
?tmp
:pattern.prev3freq;
rule->arg2 = NONE;
break;
case NEXT_1_OR_2_OR_3_TAG_IS:
rule->arg1 = (
(tmp = ((pattern.next1freq >= pattern.next2freq)?
pattern.nexttag1
:pattern.nexttag2))
>pattern.next3freq)
?tmp
:pattern.next3freq;
rule->arg2 = NONE;
break;
case PREV_TAG_IS_X_AND_NEXT_TAG_IS_Y:
rule->arg1 = pattern.prevtag1;
rule->arg2 = pattern.nexttag1;
break;
case PREV_TAG_IS_X_AND_NEXT_2_TAG_IS_Y:
rule->arg1 = pattern.prevtag1;
rule->arg2 = pattern.nexttag2;
break;
case NEXT_TAG_IS_X_AND_PREV_2_TAG_IS_Y:
rule->arg1 = pattern.nexttag1;
rule->arg2 = pattern.prevtag2;
break;
case NEXT_TAG_IS_X_AND_NEXT_2_TAG_IS_Y:
rule->arg1 = pattern.nexttag1;
rule->arg2 = pattern.nexttag2;
break;
case PREV_TAG_IS_X_AND_PREV_2_TAG_IS_Y:
rule->arg1 = pattern.prevtag1;
rule->arg2 = pattern.prevtag2;
break;
default:
break;
}
}
/* calculates the error improved by a rule */
int get_rule_error_improvement(corpus_t corpus, contextual_rule_t rule, error_t error){
int improvement = 0;
int errors_created = 0;
for(size_t i = 0; i < error.number; i++){
int *intptr = (int*)error.indices.elems[i];
if(check_contextual_rule(rule, *intptr, corpus))
improvement++;
}
for(size_t i = 0; i < corpus.num_lines; i++){
if(corpus.machine_tags[i] == corpus.human_tags[i] &&
check_contextual_rule(rule, i, corpus)){
errors_created++;
}
}
return improvement - errors_created;
}
sorted_error_list_t* error_frequencies(corpus_t corpus){
printf("we are currently in this method\n");
struct hashmap map;
hashmap_init(&map, hashmap_hash_string, hashmap_compare_string, 0);
for(size_t i = 0; i < corpus.num_lines; i++){
if(corpus.info[i].ignore_flag)
continue;
//If there is an error, see if the key exists
// printf("Here is the comparisons for word %s\t %d %d\n", corpus.words[i], corpus.machine_tags[i], corpus.human_tags[i] );
if(corpus.machine_tags[i] != corpus.human_tags[i]){
// printf("Checking to see if the key exists\n");
int* tempkey;
*tempkey = corpus.human_tags[i] + corpus.machine_tags[i];
error_t *er = error_hashmap_get(&map, tempkey);
size_t *sizetptr = (size_t*)malloc(sizeof(size_t));
//If the hashmap entry did not exist, allocate a new error_t struct and initialize variables
//Idk how to properly malloc the struct, but I know you do. So instead of calling malloc, I
//suppose just write a method that returns a pointer to a properly malloced error_t struct.
//Idk how to do the int* allocation :p
if(er == NULL){
error_t *error = malloc (sizeof (struct error_t));
int *key = malloc(sizeof(int*));
//int *number = malloc(sizeof(int*));
//*number = 1;
*key =corpus.human_tags[i] + corpus.machine_tags[i];
// printf("Key value for this iteration is: %d\n", *key);
error->number=1;
initialize_dynamic_array(&(error->indices), ERROR_STARTING_LENGTH, sizeof(size_t*));
*sizetptr = i;
// printf("Index of this error is: %zu\n", i);
add_to_dynamic_array(&(error->indices), sizetptr);
//error->indices[error->number-1] = i;
error->human_tag = corpus.human_tags[i];
error->machine_tag = corpus.machine_tags[i];
//Put the error_t struct in the hashmap with the appropriate key
error_hashmap_put(&map, key, error);
}
//If the hashmap entry did exist, increase the frequency by 1 and keep track of the index
else{
// printf("Should be incrementing the number by 1\n");
*sizetptr = i;
er->number+=1;
add_to_dynamic_array(&(er->indices), sizetptr);
}
}
}
return errors_sorted_by_frequency(map);
}
//Method used to return an array with key values in order from low to high according to frequency.
//Keys starting at index [size-1] have the highest frequency, with subsequent values having lower frequencies.
//This will prevent us from having to iterate through the hashmap n amount of times.
//This method could also just return an int* containing the keys in the correct order. Either or is easy to do.
sorted_error_list_t* errors_sorted_by_frequency(hashmap_t map){
int index = 0;
int count = hashmap_size(&map);
int* initial_order;
error_t* errors_ordered;
initial_order = malloc(count * sizeof(int));
sorted_error_list_t* errors= malloc(sizeof(sorted_error_list_t));
errors_ordered = (error_t *)malloc(count * sizeof(error_t));
memset(errors_ordered, 0, count*sizeof(error_t));
struct hashmap_iter *iter;
//Iterate through the data. Get the pointer to the error_t struct and put its frequency in the array
for (iter = hashmap_iter(&map); iter; iter = hashmap_iter_next(&map, iter)) {
error_t *er = hashmap_iter_get_data(iter);
//printf("human tag %d vs machine tag %d\n", er->human_tag, er->machine_tag);
//printf("number = %zu\n", er->number);
initial_order[index] = er->number;
index++;
}
//Qsort using the comparator helper function. We are sorting COUNT pieces of data
qsort(initial_order, count, sizeof(int), cmpfunc);
//Iterate back through the hashmap. This time, corresponding spots in the errors_ordered will be populated with
//The correct struct. For example, if initial order has frequencies {5, 7, 9}
//Then errors_ordered will have {error_t struct with 5 freq, error_t struct with 7 freq, error_t struct with 9 freq}
// printf("We have made it past qsort\n");
//if(index == 20942)
//for(int i = 0; i < count; i++){
// printf("initial_order[%d] = %d\n", i, initial_order[i]);
// }
for (iter = hashmap_iter(&map); iter; iter = hashmap_iter_next(&map, iter)) {
error_t *er = (error_t *)hashmap_iter_get_data(iter);
//This inner loop iterates through the initial_order array and compares the current
//error_t structs frequency value. If it's a match, the error_t struct is placed into the array
//at the correct index. errors_ordered[i] is checked for null to avoid overwriting another struct.
//Tiebreakers are unnecesary since error_t's with the same frequency value don't have a specific order.
for(int i = 0; i < count; i++){
//error_t temp = errors_ordered[i];
//printf("Number: %d\n", temp.number);
error_t * temppoint = &errors_ordered[i];
//printf("temppoint numver = %zu\n", temppoint->number);
//if(i!=0){ error_t* temppoint = &errors_ordered[i]};
if(er->number == initial_order[i] && temppoint->number == 0){
// printf("Putting error with size: %zu into index %d\n", er->number, i);
errors_ordered[i] = *er;
break;
}
}
}
errors->length = count;
errors->errors = errors_ordered;
global_hashmap = map;
printf("Value of length is %zu\n", errors->length);
free(initial_order);
return errors;
//Reminder, free this pointer as well as the hashmap. This array of error_t
//point to the same spot as the hashmap, so freeing this should free up the hashmap as well.
//Then make sure to call hashmap destroy.
}
pattern_t find_patterns(corpus_t corpus, error_t error){
size_t number = error.number;
int* prev3 = malloc(sizeof(int *) * number);
int* prev2 = malloc(sizeof(int *) * number);
int* prev1 = malloc(sizeof(int *) * number);
int* next1 = malloc(sizeof(int *) * number);
int* next2 = malloc(sizeof(int *) * number);
int* next3 = malloc(sizeof(int *) * number);
/*go to index of each error and get prev and next tags.*/
for(int i = 0; i < number; i++){
int prev3i = *((int *)(error.indices.elems[i])) -3;
int prev2i = *((int *)(error.indices.elems[i])) -2;
int prev1i = *((int *)(error.indices.elems[i])) -1;
int next1i = *((int *)(error.indices.elems[i])) +1;
int next2i = *((int *)(error.indices.elems[i])) +2;
int next3i = *((int *)(error.indices.elems[i])) +3;
prev3[i] = (corpus.info[i].prev_bound<=-3)?corpus.machine_tags[prev3i]:0;
prev2[i] = (corpus.info[i].prev_bound<=-2)?corpus.machine_tags[prev2i]:0;
prev1[i] = (corpus.info[i].prev_bound<=-1)?corpus.machine_tags[prev1i]:0;
next1[i] = (corpus.info[i].next_bound>=1)?corpus.machine_tags[next1i]:0;
next2[i] = (corpus.info[i].next_bound>=2)?corpus.machine_tags[next2i]:0;
next3[i] = (corpus.info[i].next_bound>=3)?corpus.machine_tags[next3i]:0;
}
pattern_t *pattern = malloc(sizeof(pattern_t));
size_t frequency;
pattern->prevtag3 = find_most_frequent(prev3, &frequency, number);
pattern->prev3freq = frequency;
pattern->prevtag2 = find_most_frequent(prev2, &frequency, number);
pattern->prev2freq= frequency;
pattern->prevtag1 = find_most_frequent(prev1, &frequency, number);
pattern->prev1freq= frequency;
pattern->nexttag1 = find_most_frequent(next1, &frequency, number);
pattern->next1freq= frequency;
pattern->nexttag2 = find_most_frequent(next2, &frequency, number);
pattern->next2freq= frequency;
pattern->nexttag3 = find_most_frequent(next3, &frequency, number);
pattern->next3freq= frequency;
free(prev3);
free(prev2);
free(prev1);
free(next1);
free(next2);
free(next3);
printf("Done parsing pattern\n");
return *pattern;
}
//Helper method for qsort
int cmpfunc (const void * a, const void * b) {
return ( *(int*)b - *(int*)a );
}
//Helper method for finding most frequent tag in the surround tags
int find_most_frequent(int* values, size_t *frequency, size_t size){
int highest = -1;
int count = 0; // count of an individual tag
int tag; // stores tag
int most_frequent;
#pragma omp parallel for num_threads(4)
for(size_t i = 0; i < size; i++){
if(i%10000 == 0) printf("Currently at index: %zu\n", i);
count = 0;
tag = values[i];
for(size_t j = 0; j < size; j++){
if(j != i) // no point checking against itself.
if(values[j] == tag) count++;
}
if(count > highest)
{
highest = count;
most_frequent = tag;
}
}
*frequency = highest;
printf("returning most frequent: %d\n", most_frequent);
return most_frequent;
}
hashmap_t return_map(){
return global_hashmap;
}
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in place, in which case C or
// A are modified in place. If the matrix to be transposed has more than one
// vector, it may have jumbled indices in its vectors, which must be sorted.
// If the input matrix has a single vector, it must be already sorted on input.
// The input matrix may have shallow components (even if in place), and the
// output may also have shallow components (even in the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// If A_in is NULL, then C = (*Chandle) is transposed in place. If out of
// memory, (*Chandle) is always returned as NULL, which frees the input matrix
// C if the transpose is done in place.
// If A_in is not NULL and Chandle is NULL, then A is modified in place, and
// the A_in matrix is not freed when done.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. For many matrices, e is O(n),
// although the constant can be high. The qsort method is more scalable, but
// not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
const GrB_UnaryOp op_in, // optional operator to apply to the values
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in place
//--------------------------------------------------------------------------
bool in_place_C, in_place_A ;
GrB_Matrix A, C ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// C = C' ; &C is transposed in place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in place
in_place_A = false ;
ASSERT (A == C && A == (*Chandle)) ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// A = A' ; A is transposed in place; reuse the header of A
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in place
ASSERT (A == C && A == (*Chandle)) ;
}
else
{
//----------------------------------------------------------------------
// C = A' ; C and A are different
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL.
A = A_in ;
C = NULL ;
(*Chandle) = NULL ; // C must be allocated; freed on error
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && A != (*Chandle)) ;
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_OK_OR_JUMBLED (GB_check (A, "A input for GB_transpose", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (ctype, "ctype for GB_transpose", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (op_in, "op for GB_transpose", GB0)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use here
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t aplen = A->plen ;
bool A_is_hyper = A->is_hyper ;
double A_hyper_ratio = A->hyper_ratio ;
int64_t anzmax = A->nzmax ;
// if in place, these must be freed when done, whether successful or not
int64_t *restrict Ap = A->p ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ai = A->i ;
GB_void *restrict Ax = A->x ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
// free prior content of A, if transpose is done in place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in placed */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t)) ;\
if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t)) ;\
if (!Ai_shallow) GB_FREE_MEMORY (Ai, anzmax , sizeof (int64_t)) ;\
if (!Ax_shallow) GB_FREE_MEMORY (Ax, anzmax , asize) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_MATRIX_FREE (Chandle) ; \
} \
}
// free both A (if in place) and C (if not in place of A)
#define GB_FREE_A_AND_C \
{ \
GB_FREE_IN_PLACE_A ; \
GB_FREE_C ; \
}
//--------------------------------------------------------------------------
// determine the type of C and get the unary operator
//--------------------------------------------------------------------------
GrB_UnaryOp op ;
if (op_in == NULL)
{
// no operator
op = NULL ;
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
else
{
// If a unary operator z=op(x) is present, C is always returned as
// op->ztype. The input ctype is ignored.
if (op_in->opcode == GB_IDENTITY_opcode && atype == op_in->xtype)
{
// op is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast.
ASSERT (op_in->ztype == op_in->xtype) ;
op = NULL ;
ctype = atype ;
}
else
{
// apply the operator, z=op(x)
op = op_in ;
ctype = op->ztype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
GB_CREATE (Chandle, ctype, avdim, avlen, GB_Ap_calloc,
C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_OK (GB_check (*Chandle, "C transpose empty", GB0)) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be already sorted on input
ASSERT_OK (GB_check (A, "the vector A must already be sorted", GB0)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
GB_NEW (Chandle, ctype, 1, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *restrict Cx = NULL ;
int64_t *restrict Cp ;
int64_t *restrict Ci ;
GB_MALLOC_MEMORY (Cp, anz+1, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (Ci, anz , sizeof (int64_t)) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
GB_MALLOC_MEMORY (Cx, anz, ctype->size) ;
}
if (Cp == NULL || Ci == NULL || (allocate_new_Cx && (Cx == NULL)))
{
// out of memory
GB_FREE_MEMORY (Cp, anz+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ;
GB_FREE_MEMORY (Cx, anz , csize) ;
GB_FREE_A_AND_C ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// the transpose will now succeed; fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op (Cx, op, Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
C->h = Ai ; C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
C->nzmax = anz ;
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ; C->i_shallow = false ;
C->p = Cp ; C->p_shallow = false ;
// fill the vector pointers C->p
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k <= anz ; k++)
{
Cp [k] = k ;
}
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_OK (GB_check (A, "1-by-n input A already sorted", GB0)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is NON-hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
GB_NEW (Chandle, ctype, avdim, 1, GB_Ap_null, C_is_csc,
GB_FORCE_NONHYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *restrict Cx = NULL ;
int64_t *restrict Cp ;
int64_t *restrict Ci = NULL ;
GB_CALLOC_MEMORY (Cp, 2, sizeof (int64_t)) ;
bool allocate_new_Ci = (!A_is_hyper) ;
if (allocate_new_Ci)
{
// A is not hypersparse, so new space is needed for Ci
GB_MALLOC_MEMORY (Ci, anz, sizeof (int64_t)) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
GB_MALLOC_MEMORY (Cx, anz, ctype->size) ;
}
if (Cp == NULL || (allocate_new_Cx && (Cx == NULL))
|| (allocate_new_Ci && (Ci == NULL)))
{
// out of memory
GB_FREE_MEMORY (Cp, 2 , sizeof (int64_t)) ;
GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ;
GB_FREE_MEMORY (Cx, anz , csize) ;
GB_FREE_A_AND_C ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
if (op != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op (Cx, op, Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
ASSERT (!allocate_new_Ci) ;
C->i = Ah ; C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (allocate_new_Ci) ;
ASSERT (Ah == NULL) ;
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
int ntasks = (nth == 1) ? 1 : (8 * nth) ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t Count [ntasks+1] ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ; C->i_shallow = false ;
}
//----------------------------------------------------------------------
// vector pointers of C
//----------------------------------------------------------------------
C->nzmax = anz ;
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ; C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else
{
//======================================================================
// transpose a general matrix
//======================================================================
ASSERT_OK_OR_JUMBLED (GB_check (A, "A GB_transpose jumbled ok", GB0)) ;
ASSERT (avdim > 1 && avlen > 1) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// for the qsort method, if the transpose is done in place and A->i is
// not shallow, A->i can be used and then freed. Otherwise, A->i is
// not modified at all.
bool recycle_Ai = (in_place && !Ai_shallow) ;
bool use_qsort ;
if (A_is_hyper)
{
//------------------------------------------------------------------
// always use qsort for hypersparse matrices
//------------------------------------------------------------------
use_qsort = true ;
}
else
{
//------------------------------------------------------------------
// select qsort if the transpose will likely be hypersparse
//------------------------------------------------------------------
use_qsort = GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, avlen) ;
}
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_qsort)
{
//==================================================================
// transpose via quicksort
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork ;
GB_MALLOC_MEMORY (iwork, anz, sizeof (int64_t)) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
return (GB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
GB_extract_vector_list (iwork, A, nthreads) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
GB_NEW (Chandle, ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ;
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t));
if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t));
}
int64_t *jwork = NULL ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ;
if (!recycle_Ai)
{
// allocate jwork of size anz
GB_MALLOC_MEMORY (jwork, anz, sizeof (int64_t)) ;
}
if (op != NULL)
{
// allocate Swork of size anz * csize
GB_MALLOC_MEMORY (Swork, anz, csize) ;
}
if ((!recycle_Ai && (jwork == NULL))
|| ((op != NULL) && (Swork == NULL)))
{
// out of memory
GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ;
GB_FREE_MEMORY (jwork, anz, sizeof (int64_t)) ;
GB_FREE_MEMORY (Swork, anz, csize) ;
GB_FREE_A_AND_C ;
return (GB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op != NULL)
{
// Swork = op ((op->xtype) Ax)
GB_apply_op (Swork, op, Ax, atype, anz, Context) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE_MEMORY (Ax, anzmax , asize) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
GrB_Matrix T ;
info = GB_builder
(
&T, // create T
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&jwork, // jwork_handle, freed on output
&Swork, // Swork_handle, freed on output
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
false, // ijcheck: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_A_AND_C ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
ASSERT (!A_is_hyper) ;
// T is also typecasted to ctype, if not NULL
GrB_Matrix T ;
info = GB_transpose_bucket (&T, ctype, C_is_csc, A, op, Context) ;
// free prior content, if C=A' is being done in place
if (in_place_A)
{
// free all content of A, but not the header, if in place of A
GB_PHIX_FREE (A) ; // transpose in-place
}
else if (in_place_C)
{
// free all of C, including the header, if done in place of C
GB_MATRIX_FREE (Chandle) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
return (info) ;
}
ASSERT_OK (GB_check (T, "T from bucket", GB0)) ;
if (in_place_A)
{
// The header of A has not been freed, since it is used for the
// output. Transplant T back into A and free T. T is not
// shallow and no typecast is done so this will always succeed.
info = GB_transplant (A, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// If C=A' is done in place of C, then the header and content
// of the input C has been freed. The output T can now be
// moved to the Chandle.
ASSERT (*Chandle == NULL) ;
(*Chandle) = T ;
}
}
}
//--------------------------------------------------------------------------
// conform the result to the desired hypersparsity of A
//--------------------------------------------------------------------------
// get the output matrix
C = (*Chandle) ;
// transplant the hyper_ratio from A to C
C->hyper_ratio = A_hyper_ratio ;
ASSERT_OK (GB_check (C, "C to conform in GB_transpose", GB0)) ;
info = GB_to_hyper_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_OK (GB_check (*Chandle, "Chandle conformed in GB_transpose", GB0)) ;
return (GrB_SUCCESS) ;
}
|
simpleFlush.c | int main() {
int x;
{
int x;
#pragma omp flush
}
{
int x;
#pragma omp flush
}
}
|
GB_unop__exp_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp_fp32_fp32)
// op(A') function: GB (_unop_tran__exp_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = expf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = expf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = expf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp.c |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#define PACKET_COUNT 10000
#define RESOLUTION 10000000
typedef struct {
void *data;
} t_data;
typedef struct {
void *result;
} t_result;
int is_prime(const int p)
{
for (int i = 3; i <= sqrt(p); i++)
{
if (p % i == 0)
{
return 0;
}
}
return 1;
}
int goldbach(int number, int rank, int first)
{
int i = 2;
for (int j = number - i; j > 2; j--, i++)
{
if (is_prime(i) == 1 && is_prime(j) == 1)
{
if (first)
{
printf("[Rank %d] The first sum is %d + %d = %d \n", rank, i, j, number);
}
return 1;
}
}
return 0;
}
void *goldbach_range(void *args)
{
thargs_t arguments = *((thargs_t *)args);
//int start, int end, int rank;
int result = 1;
for (int i = arguments.range_start; i <= arguments.range_end; i++)
{
if (goldbach(i, arguments.rank, i == arguments.range_start) == 0)
{
result = 0;
}
}
MPI_Send(&result, 1, MPI_INT, arguments.rank, RESULT, MPI_COMM_WORLD);
//return 1;
}
double dtime()
{
double tseconds = 0.0;
struct timeval mytime;
gettimeofday(&mytime,(struct timezone*)0);
tseconds = (double)(mytime.tv_sec +mytime.tv_usec*1.0e-6);
return( tseconds );
}
t_data *partition(t_data inputdata,int partition_count) {}
double f(double x) {
return sin(x)*sin(x)/x;
}
double one_shot_integration(double start,double end) {
return ((end-start)*(f(start)+f(end))/2);
}
double adaptive_integration(double start,double end,int k) {
// first check if we need to go any deeper
double middle=(start+end)/2;
double a1,a2,a;
double r;
int threadid;
a=one_shot_integration(start,end);
a1=one_shot_integration(start,middle);
a2=one_shot_integration(middle,end);
if (k<3) {
if (fabs(a-a1-a2)>0.000000001) { // go deeper
r=0;
#pragma omp parallel sections firstprivate(start,middle,end,k) reduction(+:r) num_threads(2)
{
#pragma omp section
{
r=adaptive_integration(start,middle,k+1);
}
#pragma omp section
{
r=adaptive_integration(middle,end,k+1);
}
}
return r;
}
} else {
if (fabs(a-a1-a2)>0.000000001) { // go deeper
a1=adaptive_integration(start,middle,k+1);
a2=adaptive_integration(middle,end,k+1);
a=a1+a2;
}
}
return a;
}
void process(t_data data,t_result result) {
// use the adaptive quadrature approach i.e. check whether we need
// to dive deeper or whether the current resolution if fine
// this can also be parallelized using OpenMP - later put it into a framework
// compute an integrate of function f()
// over range [data.data[0], data.data[1]]
// do it adaptively
double pivot;
double r_a=((double *)data.data)[0];
double r_b=((double *)data.data)[1];
double integrate=0;
double r_length=(r_b-r_a)/RESOLUTION;
double x=r_a;
int i;
double t_start,t_stop;
t_start=dtime();
integrate=adaptive_integration(r_a,r_b,0);
*((double *)result.result)=integrate; // store the result
}
t_result *allocate_results(int *packet_count) {
int i;
// prepare space for results
t_result *r_results=(t_result *)malloc(sizeof(t_result)*PACKET_COUNT);
if (r_results==NULL) {
perror("Not enough memory");
exit(-1);
}
for(i=0;i<PACKET_COUNT;i++) {
r_results[i].result=malloc(sizeof(double));
if (r_results[i].result==NULL) {
perror("Not enough memory");
exit(-1);
}
}
*packet_count=PACKET_COUNT;
return r_results;
}
t_data *generate_data(int *packet_count) {
// prepare the input data
// i.e. the given range is to be divided into packets
int i;
double a=40000000,b=5000000;
double packet_size=(b-a)/PACKET_COUNT;
t_data *p_packets;
double *p_data;
double r_a,r_b;
// prepare PACKET_COUNT number of packets
t_data *packets=(t_data *)malloc(sizeof(t_data)*PACKET_COUNT);
if (packets==NULL) {
perror("Not enough memory");
exit(-1);
}
r_a=a;
r_b=a+packet_size;
p_packets=packets; // pointer to the beginning of the packets
for(i=0;i<PACKET_COUNT;i++) {
packets[i].data=malloc(2*sizeof(double));
if (packets[i].data==NULL) {
perror("Not enough memory");
exit(-1);
}
// populate the packet with the data
p_data=(double *)packets[i].data;
*p_data=r_a;
*(p_data+1)=r_b;
r_a+=packet_size;
r_b+=packet_size;
}
*packet_count=PACKET_COUNT;
return packets;
}
t_data *data;
t_result *results;
main(int argc,char **argv) {
// prepare the input data
// i.e. the given range is to be divided into packets
// now the main processing loop using OpenMP
int counter;
int my_data;
double result;
int i;
int packet_count;
int results_count;
int threadid;
int threadclass; // for using various critical sections
double t_start,t_stop,t_total,t_current,t_min;
int t_counter=0;
t_min=100000000;
do {
data=generate_data(&packet_count);
results=allocate_results(&results_count);
counter=0;
t_total=0;
t_start=dtime();
omp_set_nested(1);
omp_set_dynamic(0);
#pragma omp parallel private(my_data) firstprivate(threadid,threadclass) shared(counter) num_threads(60)
{
threadid=omp_get_thread_num();
do {
// each thread will try to get its data from the available list
#pragma omp critical
{
my_data=counter;
counter++; // also write result to the counter
}
// process and store result -- this can be done without synchronization
//results[my_data]=
if (my_data<PACKET_COUNT)
process(data[my_data],results[my_data]); // note that processing
// may take various times for various data packets
} while (my_data<PACKET_COUNT); // otherwise simply exit because
// there are no more data packets to process
}
t_stop=dtime();
#pragma omp barrier
// now just add results
result=0;
for(i=0;i<PACKET_COUNT;i++) {
// printf("\nVal[%d]=%.8f",i,*((double *)results[i].result));
// fflush(stdout);
result+=*((double *)results[i].result);
}
t_counter++;
t_current=t_stop-t_start;
if (t_current<t_min)
t_min=t_current;
} while ((t_counter<4) && (t_current<100));
printf("\nFinished");
printf("\nThe total value of the integrate is %.5f\n",result);
printf("\nTotal time elapsed=%.8f\n",t_min);
}
|
GB_binop__second_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int32)
// A*D function (colscale): GB (_AxD__second_int32)
// D*A function (rowscale): GB (_DxB__second_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int32_t
// A type: int32_t
// A pattern? 1
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
cpu_stream.h | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#define ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#include "oneflow/core/ep/include/stream.h"
#include "oneflow/core/ep/cpu/cpu_device.h"
#define OF_RUNTIME_SEQ 0u
#define OF_RUNTIME_OMP 1u
#define OF_RUNTIME_TBB 2u
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
#include <omp.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/global_control.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
#ifdef WITH_ONEDNN
#include <oneapi/dnnl/dnnl.hpp>
#endif
namespace oneflow {
namespace ep {
class CpuNumThreadsGuard {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuNumThreadsGuard);
explicit CpuNumThreadsGuard(size_t num_threads) : set_num_threads_(num_threads) {
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
saved_num_threads_ = omp_get_max_threads();
omp_set_num_threads(set_num_threads_);
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
saved_num_threads_ =
tbb::global_control::active_value(tbb::global_control::max_allowed_parallelism);
if (set_num_threads_ != saved_num_threads_) {
tbb::global_control global_thread_limit(tbb::global_control::max_allowed_parallelism,
set_num_threads_);
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
~CpuNumThreadsGuard() {
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
omp_set_num_threads(saved_num_threads_);
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
if (set_num_threads_ != saved_num_threads_) {
tbb::global_control global_thread_limit(tbb::global_control::max_allowed_parallelism,
saved_num_threads_);
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
private:
size_t set_num_threads_;
size_t saved_num_threads_;
};
class CpuStream : public Stream {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuStream);
explicit CpuStream(Device* device) : device_(device) {
#ifdef WITH_ONEDNN
onednn_engine_.reset(new dnnl::engine(dnnl::engine::kind::cpu, 0));
onednn_stream_.reset(new dnnl::stream(*onednn_engine_));
#endif
}
~CpuStream() override = default;
DeviceType device_type() const override;
Device* device() const override;
Maybe<void> Sync() override;
void RecordEvent(Event* event) override;
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func) {
ParallelFor(begin, end, func, kParallelForDefaultGrain);
}
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func, size_t grain_size) {
#if OF_CPU_THREADING_RUNTIME != OF_RUNTIME_SEQ
auto DivUp = [](int64_t x, int64_t y) { return (x + y - 1) / y; };
size_t num_threads = dynamic_cast<CpuDevice*>(device())->GetNumThreads();
#endif
if (begin >= end) { return; }
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
if (grain_size > 0) {
num_threads = std::min(num_threads, (size_t)(DivUp((end - begin), grain_size)));
} else {
num_threads = 1;
}
#pragma omp parallel num_threads(num_threads)
{
int64_t omp_num_thread = omp_get_num_threads();
int64_t chunk_size = DivUp((end - begin), omp_num_thread);
int64_t omp_tid = omp_get_thread_num();
int64_t thread_begin_index = begin + omp_tid * chunk_size;
int64_t thread_end_index = std::min(end, chunk_size + thread_begin_index);
if (thread_begin_index < end) { func(thread_begin_index, thread_end_index); }
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
CpuNumThreadsGuard guard(num_threads);
size_t tmp_chunk_size = DivUp((end - begin), num_threads);
int64_t chunk_size = std::max(tmp_chunk_size, grain_size);
tbb::parallel_for(
tbb::blocked_range<int64_t>(begin, end, chunk_size),
[func](const tbb::blocked_range<int64_t>& r) { func(r.begin(), r.end()); },
tbb::static_partitioner{});
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
func(begin, end);
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
#ifdef WITH_ONEDNN
dnnl::engine* onednn_engine() const { return onednn_engine_.get(); }
dnnl::stream* onednn_stream() const { return onednn_stream_.get(); }
#endif
private:
#ifdef WITH_ONEDNN
std::unique_ptr<dnnl::engine> onednn_engine_;
std::unique_ptr<dnnl::stream> onednn_stream_;
#endif
Device* device_;
static constexpr size_t kParallelForDefaultGrain = 32768;
};
} // namespace ep
} // namespace oneflow
#endif // ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
|
symv_c_coo_n_lo_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/kernel_plain.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
symv_coo_n_lo_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT nnz = A->nnz;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < nnz; i++)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT r = A->row_indx[i];
const ALPHA_INT c = A->col_indx[i];
if (r < c)
{
continue;
}
ALPHA_Number v;
alpha_mul_3c(v, alpha, A->values[i]);
if (r == c)
{
alpha_madde(tmp[threadId][r], v, x[c]);
}
else
{
alpha_madde(tmp[threadId][r], v, x[c]);
alpha_madde(tmp[threadId][c], v, x[r]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
return symv_coo_n_lo_omp(alpha, A, x, beta, y);
}
|
GB_unop__sqrt_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fp64_fp64)
// op(A') function: GB (_unop_tran__sqrt_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sqrt (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sqrt (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sqrt (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
#endif
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
// Fallback to GEMV if either the lhs or rhs is a runtime vector
if (dst.cols() == 1)
{
typename Dest::ColXpr dst_vec(dst.col(0));
return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
}
else if (dst.rows() == 1)
{
typename Dest::RowXpr dst_vec(dst.row(0));
return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
}
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
VerletClusterListsTest.h | /**
* @file VerletClusterListsTest.h
* @author nguyen
* @date 21.10.18
*/
#pragma once
#include <gtest/gtest.h>
#include "AutoPasTestBase.h"
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/verletClusterLists/traversals/VerletClustersColoringTraversal.h"
#include "autopas/particles/Particle.h"
#include "autopas/utils/WrapOpenMP.h"
#include "autopasTools/generators/RandomGenerator.h"
#include "mocks/MockFunctor.h"
#include "testingHelpers/commonTypedefs.h"
class VerletClusterListsTest : public AutoPasTestBase {};
#if defined(AUTOPAS_OPENMP)
class CollectParticlesPerThreadFunctor
: public autopas::Functor<autopas::Particle, autopas::FullParticleCell<autopas::Particle>> {
public:
static int _currentColor;
#pragma omp threadprivate(_currentColor)
std::array<std::vector<std::set<Particle *>>, 8> _particlesPerThreadPerColor;
public:
CollectParticlesPerThreadFunctor() : Functor(0) {}
void initTraversal() override {
for (int i = 0; i < 8; i++) {
_particlesPerThreadPerColor[i].resize(autopas::autopas_get_max_threads());
}
}
void AoSFunctor(Particle &i, Particle &j, bool newton3) override {
auto threadNum = autopas::autopas_get_thread_num();
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&i);
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&j);
}
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
bool isAppropriateClusterSize(unsigned int clusterSize, autopas::DataLayoutOption::Value dataLayout) const override {
return dataLayout == autopas::DataLayoutOption::aos; // this functor supports clusters only for aos!
}
static void nextColor(int newColor) { _currentColor = newColor; }
};
int CollectParticlesPerThreadFunctor::_currentColor = 0;
class ColoringTraversalWithColorChangeNotify
: public autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true> {
public:
ColoringTraversalWithColorChangeNotify(CollectParticlesPerThreadFunctor *functor,
std::function<void(int)> whenColorChanges)
: autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true>(functor) {
_whenColorChanges = std::move(whenColorChanges);
}
void notifyColorChange(unsigned long newColor) override { _whenColorChanges(newColor); }
private:
std::function<void(int)> _whenColorChanges;
};
#endif |
common.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <iomanip>
#include <iterator>
#include <memory>
#include <sstream>
#include <type_traits>
#include <utility>
#include <vector>
#ifdef _MSC_VER
#include "intrin.h"
#endif
namespace LightGBM {
namespace Common {
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static unsigned CountDecimalDigit32(uint32_t n) {
#if defined(_MSC_VER) || defined(__GNUC__)
static const uint32_t powers_of_10[] = {
0,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000
};
#ifdef _MSC_VER
unsigned long i = 0;
_BitScanReverse(&i, n | 1);
uint32_t t = (i + 1) * 1233 >> 12;
#elif __GNUC__
uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
#endif
return t - (n < powers_of_10[t]) + 1;
#else
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
if (n < 1000000000) return 9;
return 10;
#endif
}
inline static void Uint32ToStr(uint32_t value, char* buffer) {
const char kDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'
};
unsigned digit = CountDecimalDigit32(value);
buffer += digit;
*buffer = '\0';
while (value >= 100) {
const unsigned i = (value % 100) << 1;
value /= 100;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
if (value < 10) {
*--buffer = static_cast<char>(value) + '0';
} else {
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
}
inline static void Int32ToStr(int32_t value, char* buffer) {
uint32_t u = static_cast<uint32_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
Uint32ToStr(u, buffer);
}
inline static void DoubleToStr(double value, char* buffer, size_t
#ifdef _MSC_VER
buffer_len
#endif
) {
#ifdef _MSC_VER
sprintf_s(buffer, buffer_len, "%.17g", value);
#else
sprintf(buffer, "%.17g", value);
#endif
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float, bool is_unsign>
struct __TToStringHelperFast {
void operator()(T value, char* buffer, size_t) const {
Int32ToStr(value, buffer);
}
};
template<typename T>
struct __TToStringHelperFast<T, true, false> {
void operator()(T value, char* buffer, size_t
#ifdef _MSC_VER
buf_len
#endif
) const {
#ifdef _MSC_VER
sprintf_s(buffer, buf_len, "%g", value);
#else
sprintf(buffer, "%g", value);
#endif
}
};
template<typename T>
struct __TToStringHelperFast<T, false, true> {
void operator()(T value, char* buffer, size_t) const {
Uint32ToStr(value, buffer);
}
};
template<typename T>
inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper;
const size_t buf_len = 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
const size_t buf_len = 32;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
DoubleToStr(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
DoubleToStr(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK(strs.size() == static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<>
inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(keys->at(i), values->at(i));
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys->at(i) = arr[i].first;
values->at(i) = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = data->at(i).data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (std::isnan(x)) {
return 0.0;
} else if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n) {
int size = n / 32;
if (n % 32 != 0) ++size;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
vec->at(i1) |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);;
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
inline bool CheckASCII(const std::string& s) {
for (auto c : s) {
if (static_cast<unsigned char>(c) > 127) {
return false;
}
}
return true;
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
nvm_vblk.c | /*
* vblock - Virtual block functions
*
* Copyright (C) 2015-2017 Javier Gonzáles <javier@cnexlabs.com>
* Copyright (C) 2015-2017 Matias Bjørling <matias@cnexlabs.com>
* Copyright (C) 2015-2017 Simon A. F. Lund <slund@cnexlabs.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <liblightnvm.h>
#include <nvm_dev.h>
#include <nvm_vblk.h>
#include <nvm_omp.h>
#define NVM_VBLK_CMD_OPTS (NVM_CMD_SYNC | NVM_CMD_VECTOR | NVM_CMD_PRP)
int nvm_vblk_set_async(struct nvm_vblk *vblk, uint32_t depth)
{
vblk->flags &= ~NVM_CMD_SYNC;
vblk->flags |= NVM_CMD_ASYNC;
if (!vblk->async_ctx) {
if (NULL == (vblk->async_ctx = nvm_async_init(vblk->dev, depth, 0))) {
NVM_DEBUG("FAILED: nvm_async_init");
return -1;
}
if (depth == 0) {
depth = nvm_async_get_depth(vblk->async_ctx);
}
vblk->rets = calloc(depth, sizeof(struct nvm_ret *));
for (uint32_t i = 0; i < depth; i++)
vblk->rets[i] = calloc(1, sizeof(struct nvm_ret));
}
return 0;
}
int nvm_vblk_set_scalar(struct nvm_vblk *vblk)
{
vblk->flags &= ~NVM_CMD_VECTOR;
vblk->flags |= NVM_CMD_SCALAR;
return 0;
}
static void vblk_async_callback(struct nvm_ret *ret, void *opaque)
{
struct nvm_vblk_async_cb_state *state = opaque;
struct nvm_vblk *vblk = state->vblk;
if (ret->status) {
(*state->nerr)++;
}
memset(ret, 0, sizeof(*ret));
vblk->rets[--(vblk->retsp)] = ret;
}
struct nvm_vblk* nvm_vblk_alloc(struct nvm_dev *dev, struct nvm_addr addrs[],
int naddrs)
{
struct nvm_vblk *vblk;
const struct nvm_geo *geo;
if (naddrs > 128) {
errno = EINVAL;
return NULL;
}
geo = nvm_dev_get_geo(dev);
if (!geo) {
errno = EINVAL;
return NULL;
}
vblk = calloc(1, sizeof(*vblk));
if (!vblk) {
errno = ENOMEM;
return NULL;
}
for (int i = 0; i < naddrs; ++i) {
if (nvm_addr_check(addrs[i], dev)) {
NVM_DEBUG("FAILED: nvm_addr_check");
errno = EINVAL;
free(vblk);
return NULL;
}
vblk->blks[i].ppa = addrs[i].ppa;
}
vblk->nblks = naddrs;
vblk->dev = dev;
vblk->pos_write = 0;
vblk->pos_read = 0;
vblk->flags = NVM_VBLK_CMD_OPTS;
switch (nvm_dev_get_verid(dev)) {
case NVM_SPEC_VERID_12:
vblk->nbytes = vblk->nblks * geo->nplanes * geo->npages *
geo->nsectors * geo->sector_nbytes;
break;
case NVM_SPEC_VERID_20:
vblk->nbytes = vblk->nblks * geo->l.nsectr * geo->l.nbytes;
break;
default:
NVM_DEBUG("FAILED: unsupported verid");
errno = ENOSYS;
free(vblk);
return NULL;
}
return vblk;
}
struct nvm_vblk *nvm_vblk_alloc_line(struct nvm_dev *dev, int ch_bgn,
int ch_end, int lun_bgn, int lun_end,
int blk)
{
const int verid = nvm_dev_get_verid(dev);
const struct nvm_geo *geo = nvm_dev_get_geo(dev);
struct nvm_vblk *vblk;
vblk = nvm_vblk_alloc(dev, NULL, 0);
if (!vblk)
return NULL; // Propagate errno
switch (verid) {
case NVM_SPEC_VERID_12:
for (int lun = lun_bgn; lun <= lun_end; ++lun) {
for (int ch = ch_bgn; ch <= ch_end; ++ch) {
vblk->blks[vblk->nblks].ppa = 0;
vblk->blks[vblk->nblks].g.ch = ch;
vblk->blks[vblk->nblks].g.lun = lun;
vblk->blks[vblk->nblks].g.blk = blk;
++(vblk->nblks);
}
}
vblk->nbytes = vblk->nblks * geo->nplanes * geo->npages *
geo->nsectors * geo->sector_nbytes;
break;
case NVM_SPEC_VERID_20:
for (int punit = lun_bgn; punit <= lun_end; ++punit) {
for (int pugrp = ch_bgn; pugrp <= ch_end; ++pugrp) {
vblk->blks[vblk->nblks].ppa = 0;
vblk->blks[vblk->nblks].l.pugrp = pugrp;
vblk->blks[vblk->nblks].l.punit = punit;
vblk->blks[vblk->nblks].l.chunk = blk;
++(vblk->nblks);
}
}
vblk->nbytes = vblk->nblks * geo->l.nsectr * geo->l.nbytes;
break;
default:
NVM_DEBUG("FAILED: unsupported verid: %d", verid);
nvm_buf_free(dev, vblk);
errno = ENOSYS;
return NULL;
}
for (int i = 0; i < vblk->nblks; ++i) {
if (nvm_addr_check(vblk->blks[i], dev)) {
NVM_DEBUG("FAILED: nvm_addr_check");
free(vblk);
errno = EINVAL;
return NULL;
}
}
return vblk;
}
void nvm_vblk_free(struct nvm_vblk *vblk)
{
free(vblk);
}
static inline int cmd_nblks(int nblks, int cmd_nblks_max)
{
int count = cmd_nblks_max;
while (nblks % count && count > 1) --count;
return count;
}
static inline ssize_t vblk_erase_s12(struct nvm_vblk *vblk)
{
size_t nerr = 0;
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const int BLK_NADDRS = geo->nplanes;
const int CMD_NBLKS = cmd_nblks(vblk->nblks,
nvm_dev_get_erase_naddrs_max(vblk->dev) / BLK_NADDRS);
const int pmode = nvm_dev_get_pmode(vblk->dev);
for (int off = 0; off < vblk->nblks; off += CMD_NBLKS) {
ssize_t err;
struct nvm_ret ret = { 0 };
const int nblks = NVM_MIN(CMD_NBLKS, vblk->nblks - off);
const int naddrs = nblks * BLK_NADDRS;
struct nvm_addr addrs[naddrs];
for (int i = 0; i < naddrs; ++i) {
const int idx = off + (i / BLK_NADDRS);
addrs[i].ppa = vblk->blks[idx].ppa;
addrs[i].g.pl = i % geo->nplanes;
}
err = nvm_cmd_erase(vblk->dev, addrs, naddrs, NULL,
pmode | NVM_VBLK_CMD_OPTS, &ret);
if (err)
++nerr;
}
if (nerr) {
errno = EIO;
return -1;
}
vblk->pos_write = 0;
vblk->pos_read = 0;
return vblk->nbytes;
}
static inline ssize_t vblk_erase_s20(struct nvm_vblk *vblk)
{
size_t nerr = 0;
const int CMD_NBLKS = cmd_nblks(vblk->nblks,
nvm_dev_get_erase_naddrs_max(vblk->dev));
for (int off = 0; off < vblk->nblks; off += CMD_NBLKS) {
const int naddrs = NVM_MIN(CMD_NBLKS, vblk->nblks - off);
struct nvm_ret ret = { 0 };
struct nvm_addr addrs[naddrs];
for (int i = 0; i < naddrs; ++i)
addrs[i].ppa = vblk->blks[off + i].ppa;
if (nvm_cmd_erase(vblk->dev, addrs, naddrs, NULL, 0x0, &ret))
++nerr;
}
if (nerr) {
errno = EIO;
return -1;
}
vblk->pos_write = 0;
vblk->pos_read = 0;
return vblk->nbytes;
}
ssize_t nvm_vblk_erase(struct nvm_vblk *vblk)
{
const int verid = nvm_dev_get_verid(nvm_vblk_get_dev(vblk));
switch (verid) {
case NVM_SPEC_VERID_12:
return vblk_erase_s12(vblk);
case NVM_SPEC_VERID_20:
return vblk_erase_s20(vblk);
default:
NVM_DEBUG("FAILED: unsupported verid: %d", verid);
errno = ENOSYS;
return -1;
}
}
static inline int _cmd_nspages(int nblks, int cmd_nspages_max)
{
int cmd_nspages = cmd_nspages_max;
while(nblks % cmd_nspages && cmd_nspages > 1) --cmd_nspages;
return cmd_nspages;
}
static inline int _vblk_async_greedy_reap(struct nvm_vblk *vblk)
{
int r, nevents = 0;
// spin until something can be reaped
do {
if (-1 == (nevents = nvm_async_poke(vblk->dev, vblk->async_ctx, 0)))
return -1;
} while (nevents == 0);
// reap until empty
do {
if (-1 == (r = nvm_async_poke(vblk->dev, vblk->async_ctx, 0)))
return -1;
nevents += r;
} while (r != 0);
return nevents;
}
static inline int vblk_io_async(struct nvm_vblk *vblk, const size_t vsectr_bgn,
const size_t count, void *buf, void *meta_buf, char *pad_buf,
int write)
{
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectrs = count / sectr_nbytes;
const size_t stripe_nsectrs = nvm_dev_get_ws_opt(vblk->dev);
const size_t nstripes = nsectrs / stripe_nsectrs;
int err;
uint64_t nerr = 0;
struct nvm_vblk_async_cb_state state = {
.nerr = &nerr,
.vblk = vblk,
};
size_t cnk_bgn = (vsectr_bgn / stripe_nsectrs) % vblk->nblks;
NVM_DEBUG("cnk_bgn: %ld", cnk_bgn);
for (size_t stripe = 0; stripe < nstripes; stripe++) {
size_t cnk_idx = (cnk_bgn + stripe) % vblk->nblks;
size_t cnk_off = (stripe / vblk->nblks) * stripe_nsectrs;
char *bufp = pad_buf ? pad_buf :
(char *)buf + (sectr_nbytes * stripe_nsectrs * stripe);
struct nvm_addr addrs[stripe_nsectrs];
for (size_t i = 0; i < stripe_nsectrs; i++) {
addrs[i].val = vblk->blks[cnk_idx].val;
addrs[i].l.sectr = cnk_off + i;
}
// this basically makes sure we never hit an EAGAIN below in
// the nvm_cmd_read/write call.
if (vblk->retsp == (nvm_async_get_depth(vblk->async_ctx) - 1)) {
if (_vblk_async_greedy_reap(vblk) < 0) {
NVM_DEBUG("FAILED: _vblk_async_greedy_reap: %d", errno);
return -1;
}
}
struct nvm_ret *ret = vblk->rets[vblk->retsp++];
if (!ret) {
NVM_DEBUG("should not happen; retsp = %d", vblk->retsp);
errno = ENOMEM;
return -1;
}
ret->async.ctx = vblk->async_ctx;
ret->async.cb = vblk_async_callback;
ret->async.cb_arg = &state;
while(1) {
err = write ?
nvm_cmd_write(vblk->dev, addrs, stripe_nsectrs,
bufp, meta_buf, vblk->flags,
ret) :
nvm_cmd_read(vblk->dev, addrs, stripe_nsectrs,
bufp, meta_buf, vblk->flags,
ret);
if (err < 0) {
if (errno == EAGAIN) {
if (_vblk_async_greedy_reap(vblk) < 0) {
NVM_DEBUG("FAILED: _vblk_async_greedy_reap: %d", errno);
return -1;
}
continue;
}
// propagate errno
return -1;
}
break;
}
if (((stripe + 1) % vblk->nblks) == 0) {
if (nvm_async_wait(vblk->dev, vblk->async_ctx) < 0) {
NVM_DEBUG("FAILED: nvm_async_wait");
return -1;
}
}
}
err = nvm_async_wait(vblk->dev, vblk->async_ctx);
if (err < 0) {
return -1;
}
return nerr;
}
static inline ssize_t vblk_async_pwrite_s20(struct nvm_vblk *vblk,
const void *buf,
size_t count, size_t offset)
{
size_t nerr = 0;
const uint32_t WS_OPT = nvm_dev_get_ws_opt(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectr = count / sectr_nbytes;
const size_t vsectr_bgn = offset / sectr_nbytes;
const size_t cmd_nsectr = WS_OPT;
const size_t meta_tbytes = cmd_nsectr * geo->l.nbytes_oob;
char *meta_buf = NULL;
const size_t pad_nbytes = cmd_nsectr * nsectr * geo->l.nbytes;
char *pad_buf = NULL;
const int meta_mode = nvm_dev_get_meta_mode(vblk->dev);
if (nsectr % WS_OPT) {
NVM_DEBUG("FAILED: unaligned nsectr: %zu", nsectr);
errno = EINVAL;
return -1;
}
if (vsectr_bgn % WS_OPT) {
NVM_DEBUG("FAILED: unaligned vsectr_bgn: %zu", vsectr_bgn);
errno = EINVAL;
return -1;
}
if (!buf) { // Allocate and use a padding buffer
pad_buf = nvm_buf_alloc(vblk->dev, pad_nbytes, NULL);
if (!pad_buf) {
NVM_DEBUG("FAILED: nvm_buf_alloc(pad)");
errno = ENOMEM;
return -1;
}
nvm_buf_fill(pad_buf, pad_nbytes);
}
if (meta_mode != NVM_META_MODE_NONE) { // Meta buffer
meta_buf = nvm_buf_alloc(vblk->dev, meta_tbytes, NULL);
if (!meta_buf) {
nvm_buf_free(vblk->dev, pad_buf);
NVM_DEBUG("FAILED: nvm_buf_alloc(meta)");
errno = ENOMEM;
return -1;
}
switch(meta_mode) { // Fill it
case NVM_META_MODE_ALPHA:
nvm_buf_fill(meta_buf, meta_tbytes);
break;
case NVM_META_MODE_CONST:
for (size_t i = 0; i < meta_tbytes; ++i)
meta_buf[i] = 65 + (meta_tbytes % 20);
break;
case NVM_META_MODE_NONE:
break;
}
}
nerr = vblk_io_async(vblk, vsectr_bgn, count, (void *) buf, meta_buf,
pad_buf, 1 /* write */);
nvm_buf_free(vblk->dev, pad_buf);
nvm_buf_free(vblk->dev, meta_buf);
if (nerr) {
NVM_DEBUG("FAILED: nvm_cmd_write, nerr(%zu)", nerr);
errno = EIO;
return -1;
}
return count;
}
static inline ssize_t vblk_async_pread_s20(struct nvm_vblk *vblk, void *buf,
size_t count, size_t offset)
{
size_t nerr = 0;
const uint32_t WS_OPT = nvm_dev_get_ws_opt(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectr = count / sectr_nbytes;
const size_t vsectr_bgn = offset / sectr_nbytes;
if (nsectr % WS_OPT) {
NVM_DEBUG("FAILED: unaligned nsectr: %zu", nsectr);
errno = EINVAL;
return -1;
}
if (vsectr_bgn % WS_OPT) {
NVM_DEBUG("FAILED: unaligned vsectr_bgn: %zu", vsectr_bgn);
errno = EINVAL;
return -1;
}
nerr = vblk_io_async(vblk, vsectr_bgn, count, buf, NULL, NULL,
0 /* write */);
if (nerr) {
NVM_DEBUG("FAILED: nvm_cmd_read, nerr(%zu)", nerr);
errno = EIO;
return -1;
}
return count;
}
static inline ssize_t vblk_sync_pread_s20(struct nvm_vblk *vblk, void *buf,
size_t count, size_t offset)
{
size_t nerr = 0;
const uint32_t WS_OPT = nvm_dev_get_ws_opt(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const size_t nchunks = vblk->nblks;
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectr = count / sectr_nbytes;
const size_t sectr_bgn = offset / sectr_nbytes;
const size_t sectr_end = sectr_bgn + (count / sectr_nbytes) - 1;
const size_t cmd_nsectr = vblk->flags & NVM_CMD_VECTOR ? NVM_NADDR_MAX : WS_OPT;
const int NTHREADS = NVM_MIN(nchunks, nsectr / WS_OPT);
if (nsectr % WS_OPT) {
NVM_DEBUG("FAILED: unaligned nsectr: %zu", nsectr);
errno = EINVAL;
return -1;
}
if (sectr_bgn % WS_OPT) {
NVM_DEBUG("FAILED: unaligned sectr_bgn: %zu", sectr_bgn);
errno = EINVAL;
return -1;
}
const int VBLK_FLAGS = vblk->flags;
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if(NTHREADS>1)
for (size_t sectr_ofz = sectr_bgn; sectr_ofz <= sectr_end; sectr_ofz += cmd_nsectr) {
struct nvm_addr addrs[cmd_nsectr];
char *buf_off = (char*)buf + (sectr_ofz - sectr_bgn) * sectr_nbytes;
for (size_t idx = 0; idx < cmd_nsectr; ++idx) {
const size_t sectr = sectr_ofz + idx;
const size_t wunit = sectr / WS_OPT;
const size_t rnd = wunit / nchunks;
const size_t chunk = wunit % nchunks;
const size_t chunk_sectr = sectr % WS_OPT + rnd * WS_OPT;
addrs[idx].val = vblk->blks[chunk].val;
addrs[idx].l.sectr = chunk_sectr;
if (VBLK_FLAGS & NVM_CMD_SCALAR) break;
}
const ssize_t err = nvm_cmd_read(vblk->dev, addrs, cmd_nsectr,
buf_off, NULL,
VBLK_FLAGS, NULL);
if (err)
++nerr;
}
if (nerr) {
NVM_DEBUG("FAILED: nvm_cmd_read, nerr(%zu)", nerr);
errno = EIO;
return -1;
}
return count;
}
static inline ssize_t vblk_sync_pwrite_s20(struct nvm_vblk *vblk,
const void *buf, size_t count,
size_t offset)
{
size_t nerr = 0;
const uint32_t WS_OPT = nvm_dev_get_ws_opt(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const size_t nchunks = vblk->nblks;
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectr = count / sectr_nbytes;
const size_t sectr_bgn = offset / sectr_nbytes;
const size_t sectr_end = sectr_bgn + (count / sectr_nbytes) - 1;
const size_t cmd_nsectr = WS_OPT;
const size_t meta_tbytes = cmd_nsectr * geo->l.nbytes_oob;
char *meta_buf = NULL;
const size_t pad_nbytes = cmd_nsectr * nsectr * geo->l.nbytes;
char *pad_buf = NULL;
const int NTHREADS = NVM_MIN(nchunks, nsectr / WS_OPT);
const int meta_mode = nvm_dev_get_meta_mode(vblk->dev);
if (nsectr % WS_OPT) {
NVM_DEBUG("FAILED: unaligned nsectr: %zu", nsectr);
errno = EINVAL;
return -1;
}
if (sectr_bgn % WS_OPT) {
NVM_DEBUG("FAILED: unaligned sectr_bgn: %zu", sectr_bgn);
errno = EINVAL;
return -1;
}
if (!buf) { // Allocate and use a padding buffer
pad_buf = nvm_buf_alloc(vblk->dev, pad_nbytes, NULL);
if (!pad_buf) {
NVM_DEBUG("FAILED: nvm_buf_alloc(pad)");
errno = ENOMEM;
return -1;
}
nvm_buf_fill(pad_buf, pad_nbytes);
}
if (meta_mode != NVM_META_MODE_NONE) { // Meta buffer
meta_buf = nvm_buf_alloc(vblk->dev, meta_tbytes, NULL);
if (!meta_buf) {
nvm_buf_free(vblk->dev, pad_buf);
NVM_DEBUG("FAILED: nvm_buf_alloc(meta)");
errno = ENOMEM;
return -1;
}
switch(meta_mode) { // Fill it
case NVM_META_MODE_ALPHA:
nvm_buf_fill(meta_buf, meta_tbytes);
break;
case NVM_META_MODE_CONST:
for (size_t i = 0; i < meta_tbytes; ++i)
meta_buf[i] = 65 + (meta_tbytes % 20);
break;
case NVM_META_MODE_NONE:
break;
}
}
const int VBLK_FLAGS = vblk->flags;
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if(NTHREADS>1)
for (size_t sectr_ofz = sectr_bgn; sectr_ofz <= sectr_end; sectr_ofz += cmd_nsectr) {
struct nvm_ret ret = { 0 };
struct nvm_addr addrs[cmd_nsectr];
char *buf_off;
if (pad_buf)
buf_off = pad_buf;
else
buf_off = (char*)buf + (sectr_ofz - sectr_bgn) * sectr_nbytes;
for (size_t idx = 0; idx < cmd_nsectr; ++idx) {
const size_t sectr = sectr_ofz + idx;
const size_t wunit = sectr / WS_OPT;
const size_t rnd = wunit / nchunks;
const size_t chunk = wunit % nchunks;
const size_t chunk_sectr = sectr % WS_OPT + rnd * WS_OPT;
addrs[idx].ppa = vblk->blks[chunk].ppa;
addrs[idx].l.sectr = chunk_sectr;
}
const ssize_t err = nvm_cmd_write(vblk->dev, addrs, cmd_nsectr,
buf_off, meta_buf,
VBLK_FLAGS, &ret);
if (err)
++nerr;
#pragma omp ordered
{}
}
nvm_buf_free(vblk->dev, pad_buf);
nvm_buf_free(vblk->dev, meta_buf);
if (nerr) {
NVM_DEBUG("FAILED: nvm_cmd_write, nerr(%zu)", nerr);
errno = EIO;
return -1;
}
return count;
}
static inline ssize_t vblk_pwrite_s12(struct nvm_vblk *vblk, const void *buf,
size_t count, size_t offset)
{
size_t nerr = 0;
const int PMODE = nvm_dev_get_pmode(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const int SPAGE_NADDRS = geo->nplanes * geo->nsectors;
const int CMD_NSPAGES = _cmd_nspages(vblk->nblks,
nvm_dev_get_write_naddrs_max(vblk->dev) / SPAGE_NADDRS);
const int ALIGN = SPAGE_NADDRS * geo->sector_nbytes;
const int NTHREADS = vblk->nblks < CMD_NSPAGES ? 1 : vblk->nblks / CMD_NSPAGES;
const size_t bgn = offset / ALIGN;
const size_t end = bgn + (count / ALIGN);
char *padding_buf = NULL;
const size_t meta_tbytes = CMD_NSPAGES * SPAGE_NADDRS * geo->meta_nbytes;
char *meta = NULL;
const int meta_mode = nvm_dev_get_meta_mode(vblk->dev);
if (offset + count > vblk->nbytes) { // Check bounds
errno = EINVAL;
return -1;
}
if ((count % ALIGN) || (offset % ALIGN)) { // Check align
errno = EINVAL;
return -1;
}
if (!buf) { // Allocate and use a padding buffer
const size_t nbytes = CMD_NSPAGES * SPAGE_NADDRS * geo->sector_nbytes;
padding_buf = nvm_buf_alloc(vblk->dev, nbytes, NULL);
if (!padding_buf) {
NVM_DEBUG("FAILED: nvm_buf_alloc(padding)");
errno = ENOMEM;
return -1;
}
nvm_buf_fill(padding_buf, nbytes);
}
if (meta_mode != NVM_META_MODE_NONE) { // Meta buffer
meta = nvm_buf_alloc(vblk->dev, meta_tbytes, NULL);
if (!meta) {
NVM_DEBUG("FAILED: nvm_buf_alloc(meta)");
errno = ENOMEM;
return -1;
}
switch(meta_mode) { // Fill it
case NVM_META_MODE_ALPHA:
nvm_buf_fill(meta, meta_tbytes);
break;
case NVM_META_MODE_CONST:
for (size_t i = 0; i < meta_tbytes; ++i)
meta[i] = 65 + (meta_tbytes % 20);
break;
case NVM_META_MODE_NONE:
break;
}
}
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if(NTHREADS>1)
for (size_t off = bgn; off < end; off += CMD_NSPAGES) {
struct nvm_ret ret = { 0 };
const int nspages = NVM_MIN(CMD_NSPAGES, (int)(end - off));
const int naddrs = nspages * SPAGE_NADDRS;
struct nvm_addr addrs[naddrs];
const char *buf_off;
if (padding_buf)
buf_off = padding_buf;
else
buf_off = (const char*)buf + (off - bgn) * geo->sector_nbytes * SPAGE_NADDRS;
for (int i = 0; i < naddrs; ++i) {
const int spg = off + (i / SPAGE_NADDRS);
const int idx = spg % vblk->nblks;
const int pg = (spg / vblk->nblks) % geo->npages;
addrs[i].ppa = vblk->blks[idx].ppa;
addrs[i].g.pg = pg;
addrs[i].g.pl = (i / geo->nsectors) % geo->nplanes;
addrs[i].g.sec = i % geo->nsectors;
}
const ssize_t err = nvm_cmd_write(vblk->dev, addrs, naddrs,
buf_off, meta, PMODE, &ret);
if (err)
++nerr;
#pragma omp ordered
{}
}
nvm_buf_free(vblk->dev, padding_buf);
nvm_buf_free(vblk->dev, meta);
if (nerr) {
errno = EIO;
return -1;
}
return count;
}
ssize_t nvm_vblk_pwrite(struct nvm_vblk *vblk, const void *buf, size_t count,
size_t offset)
{
const int verid = nvm_dev_get_verid(nvm_vblk_get_dev(vblk));
switch (verid) {
case NVM_SPEC_VERID_12:
return vblk_pwrite_s12(vblk, buf, count, offset);
case NVM_SPEC_VERID_20:
if (vblk->flags & NVM_CMD_ASYNC) {
return vblk_async_pwrite_s20(vblk, buf, count, offset);
} else {
return vblk_sync_pwrite_s20(vblk, buf, count, offset);
}
default:
NVM_DEBUG("FAILED: unsupported verid: %d", verid);
errno = ENOSYS;
return -1;
}
}
ssize_t nvm_vblk_write(struct nvm_vblk *vblk, const void *buf, size_t count)
{
ssize_t nbytes = nvm_vblk_pwrite(vblk, buf, count, vblk->pos_write);
if (nbytes < 0)
return nbytes; // Propagate errno
vblk->pos_write += nbytes; // All is good, increment write position
return nbytes; // Return number of bytes written
}
ssize_t nvm_vblk_pad(struct nvm_vblk *vblk)
{
return nvm_vblk_write(vblk, NULL, vblk->nbytes - vblk->pos_write);
}
static inline ssize_t vblk_pread_s12(struct nvm_vblk *vblk, void *buf,
size_t count, size_t offset)
{
size_t nerr = 0;
const int PMODE = nvm_dev_get_pmode(vblk->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(vblk->dev);
const int SPAGE_NADDRS = geo->nplanes * geo->nsectors;
const int CMD_NSPAGES = _cmd_nspages(vblk->nblks,
nvm_dev_get_read_naddrs_max(vblk->dev) / SPAGE_NADDRS);
const int ALIGN = SPAGE_NADDRS * geo->sector_nbytes;
const int NTHREADS = vblk->nblks < CMD_NSPAGES ? 1 : vblk->nblks / CMD_NSPAGES;
const size_t bgn = offset / ALIGN;
const size_t end = bgn + (count / ALIGN);
if (offset + count > vblk->nbytes) { // Check bounds
errno = EINVAL;
return -1;
}
if ((count % ALIGN) || (offset % ALIGN)) { // Check align
errno = EINVAL;
return -1;
}
#pragma omp parallel for num_threads(NTHREADS) schedule(static,1) reduction(+:nerr) ordered if(NTHREADS>1)
for (size_t off = bgn; off < end; off += CMD_NSPAGES) {
struct nvm_ret ret = { 0 };
const int nspages = NVM_MIN(CMD_NSPAGES, (int)(end - off));
const int naddrs = nspages * SPAGE_NADDRS;
struct nvm_addr addrs[naddrs];
char *buf_off;
buf_off = (char*)buf + (off - bgn) * geo->sector_nbytes * SPAGE_NADDRS;
for (int i = 0; i < naddrs; ++i) {
const int spg = off + (i / SPAGE_NADDRS);
const int idx = spg % vblk->nblks;
const int pg = (spg / vblk->nblks) % geo->npages;
addrs[i].ppa = vblk->blks[idx].ppa;
addrs[i].g.pg = pg;
addrs[i].g.pl = (i / geo->nsectors) % geo->nplanes;
addrs[i].g.sec = i % geo->nsectors;
}
const ssize_t err = nvm_cmd_read(vblk->dev, addrs, naddrs,
buf_off, NULL, PMODE, &ret);
if (err)
++nerr;
#pragma omp ordered
{}
}
if (nerr) {
errno = EIO;
return -1;
}
return count;
}
ssize_t nvm_vblk_pread(struct nvm_vblk *vblk, void *buf, size_t count,
size_t offset)
{
const int verid = nvm_dev_get_verid(nvm_vblk_get_dev(vblk));
switch (verid) {
case NVM_SPEC_VERID_12:
return vblk_pread_s12(vblk, buf, count, offset);
case NVM_SPEC_VERID_20:
if (vblk->flags & NVM_CMD_ASYNC) {
return vblk_async_pread_s20(vblk, buf, count, offset);
} else {
return vblk_sync_pread_s20(vblk, buf, count, offset);
}
default:
NVM_DEBUG("FAILED: unsupported verid: %d", verid);
errno = ENOSYS;
return -1;
}
}
ssize_t nvm_vblk_read(struct nvm_vblk *vblk, void *buf, size_t count)
{
ssize_t nbytes = nvm_vblk_pread(vblk, buf, count, vblk->pos_read);
if (nbytes < 0)
return nbytes; // Propagate `errno`
vblk->pos_read += nbytes; // All is good, increment read position
return nbytes; // Return number of bytes read
}
static inline ssize_t vblk_copy_s20(struct nvm_vblk *src, struct nvm_vblk *dst)
{
size_t nerr = 0;
const size_t offset = 0;
const size_t count = src->nbytes;
const uint32_t WS_MIN = nvm_dev_get_ws_min(src->dev);
const struct nvm_geo *geo = nvm_dev_get_geo(src->dev);
const size_t nchunks = src->nblks;
const size_t sectr_nbytes = geo->l.nbytes;
const size_t nsectr = count / sectr_nbytes;
const size_t sectr_bgn = offset / sectr_nbytes;
const size_t sectr_end = sectr_bgn + (count / sectr_nbytes) - 1;
const size_t cmd_nsectr_max = (NVM_NADDR_MAX / WS_MIN) * WS_MIN;
if (nsectr % WS_MIN) {
NVM_DEBUG("FAILED: unaligned nsectr: %zu", nsectr);
errno = EINVAL;
return -1;
}
if (sectr_bgn % WS_MIN) {
NVM_DEBUG("FAILED: unaligned sectr_bgn: %zu", sectr_bgn);
errno = EINVAL;
return -1;
}
for (size_t sectr_ofz = sectr_bgn; sectr_ofz <= sectr_end; sectr_ofz += cmd_nsectr_max) {
struct nvm_ret ret = { 0 };
const size_t cmd_nsectr = NVM_MIN(sectr_end - sectr_ofz + 1, cmd_nsectr_max);
struct nvm_addr addrs_src[cmd_nsectr];
struct nvm_addr addrs_dst[cmd_nsectr];
for (size_t idx = 0; idx < cmd_nsectr; ++idx) {
const size_t sectr = sectr_ofz + idx;
const size_t wunit = sectr / WS_MIN;
const size_t rnd = wunit / nchunks;
const size_t chunk = wunit % nchunks;
const size_t chunk_sectr = sectr % WS_MIN + rnd * WS_MIN;
addrs_src[idx].val = src->blks[chunk].val;
addrs_src[idx].l.sectr = chunk_sectr;
addrs_dst[idx].val = dst->blks[chunk].val;
addrs_dst[idx].l.sectr = chunk_sectr;
}
const ssize_t err = nvm_cmd_copy(src->dev, addrs_src,
addrs_dst, cmd_nsectr,
NVM_VBLK_CMD_OPTS, &ret);
if (err)
++nerr;
}
if (nerr) {
NVM_DEBUG("FAILED: nvm_cmd_write, nerr(%zu)", nerr);
errno = EIO;
return -1;
}
return count;
}
ssize_t nvm_vblk_copy(struct nvm_vblk *src, struct nvm_vblk *dst,
int NVM_UNUSED(flags))
{
const int verid = nvm_dev_get_verid(nvm_vblk_get_dev(src));
if (src->dev != dst->dev) {
NVM_DEBUG("FAILED: unsupported cross device copy");
errno = ENOSYS;
return -1;
}
if (src->nblks != dst->nblks) {
NVM_DEBUG("FAILED: unbalanced vblks");
errno = ENOSYS;
return -1;
}
if (src->nbytes != dst->nbytes) {
NVM_DEBUG("FAILED: unbalanced vblks");
errno = ENOSYS;
return -1;
}
switch (verid) {
case NVM_SPEC_VERID_20:
return vblk_copy_s20(src, dst);
case NVM_SPEC_VERID_12:
NVM_DEBUG("FAILED: not implemented, verid: %d", verid);
errno = ENOSYS;
return -1;
default:
NVM_DEBUG("FAILED: unsupported, verid: %d", verid);
errno = ENOSYS;
return -1;
}
}
struct nvm_addr *nvm_vblk_get_addrs(struct nvm_vblk *vblk)
{
return vblk->blks;
}
int nvm_vblk_get_naddrs(struct nvm_vblk *vblk)
{
return vblk->nblks;
}
size_t nvm_vblk_get_nbytes(struct nvm_vblk *vblk)
{
return vblk->nbytes;
}
size_t nvm_vblk_get_pos_read(struct nvm_vblk *vblk)
{
return vblk->pos_read;
}
size_t nvm_vblk_get_pos_write(struct nvm_vblk *vblk)
{
return vblk->pos_write;
}
struct nvm_dev *nvm_vblk_get_dev(struct nvm_vblk *vblk)
{
if (!vblk)
return NULL;
return vblk->dev;
}
int nvm_vblk_set_pos_read(struct nvm_vblk *vblk, size_t pos)
{
if (pos > vblk->nbytes) {
errno = EINVAL;
return -1;
}
vblk->pos_read = pos;
return 0;
}
int nvm_vblk_set_pos_write(struct nvm_vblk *vblk, size_t pos)
{
if (pos > vblk->nbytes) {
errno = EINVAL;
return -1;
}
vblk->pos_write = pos;
return 0;
}
void nvm_vblk_pr(struct nvm_vblk *vblk)
{
printf("vblk:\n");
printf(" dev: {pmode: '%s'}\n", nvm_pmode_str(nvm_dev_get_pmode(vblk->dev)));
printf(" nblks: %"PRIi32"\n", vblk->nblks);
printf(" nmbytes: %zu\n", vblk->nbytes >> 20);
printf(" pos_write: %zu\n", vblk->pos_write);
printf(" pos_read: %zu\n", vblk->pos_read);
printf(" flags: 0x08%x\n", vblk->flags);
nvm_addr_prn(vblk->blks, vblk->nblks, vblk->dev);
}
|
weighted_sptree.h | /*
*
* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
/*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WEIGHTED_SPTREE_H
#define WEIGHTED_SPTREE_H
#include <iostream>
#include <vector>
#include <unordered_map>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
namespace hdi{
namespace dr{
//! Sparse Partitioning Tree used for the Barnes Hut approximation
/*!
Sparse Partitioning Tree used for the Barnes Hut approximation.
The original version was implemented by Laurens van der Maaten,
\author Laurens van der Maaten
\author Nicola Pezzotti
*/
template <typename scalar_type>
class WeightedSPTree{
public:
typedef double hp_scalar_type;
private:
class Cell {
unsigned int _emb_dimension;
hp_scalar_type* corner;
hp_scalar_type* width;
public:
Cell(unsigned int emb_dimension);
Cell(unsigned int emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
~Cell();
hp_scalar_type getCorner(unsigned int d);
hp_scalar_type getWidth(unsigned int d);
void setCorner(unsigned int d, hp_scalar_type val);
void setWidth(unsigned int d, hp_scalar_type val);
bool containsPoint(scalar_type point[]);
};
// Fixed constants
static const unsigned int QT_NODE_CAPACITY = 1;
// A buffer we use when doing force computations
//hp_scalar_type* buff;
// Properties of this node in the tree
WeightedSPTree* parent;
unsigned int _emb_dimension;
bool is_leaf;
unsigned int size;
hp_scalar_type cum_size;
// Axis-aligned bounding box stored as a center with half-_emb_dimensions to represent the boundaries of this quad tree
Cell* boundary;
// Indices in this space-partitioning tree node, corresponding center-of-mass, and list of all children
scalar_type* _emb_positions;
const scalar_type* _weights;
hp_scalar_type* _center_of_mass;
unsigned int index[QT_NODE_CAPACITY];
// Children
WeightedSPTree** children;
unsigned int no_children;
public:
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N);
private:
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
public:
~WeightedSPTree();
void setData(scalar_type* inp_data, const scalar_type* weights);
WeightedSPTree* getParent();
void construct(Cell boundary);
bool insert(unsigned int new_index);
void subdivide();
bool isCorrect();
void rebuildTree();
void getAllIndices(unsigned int* indices);
unsigned int getDepth();
void computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const;
template <class sparse_scalar_matrix_type>
void computeEdgeForces(const sparse_scalar_matrix_type& matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const;
void print();
private:
void init(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
void fill(unsigned int N);
unsigned int getAllIndices(unsigned int* indices, unsigned int loc);
bool isChild(unsigned int test_index, unsigned int start, unsigned int end);
};
/////////////////////////////////////////////////////////////////
template <typename scalar_type>
template <class sparse_scalar_matrix_type>
void WeightedSPTree<scalar_type>::computeEdgeForces(const sparse_scalar_matrix_type& sparse_matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const{
const int n = sparse_matrix.size();
// Loop over all edges in the graph
#ifdef __USE_GCD__
std::cout << "GCD dispatch, weighted_sptree 176.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
std::vector<hp_scalar_type> buff(_emb_dimension,0);
unsigned int ind1, ind2;
hp_scalar_type q_ij_1;
ind1 = j * _emb_dimension;
for(auto elem: sparse_matrix[j]) {
// Compute pairwise distance and Q-value
q_ij_1 = 1.0;
ind2 = elem.first * _emb_dimension;
for(unsigned int d = 0; d < _emb_dimension; d++)
buff[d] = _emb_positions[ind1 + d] - _emb_positions[ind2 + d]; //buff contains (yi-yj) per each _emb_dimension
for(unsigned int d = 0; d < _emb_dimension; d++)
q_ij_1 += buff[d] * buff[d];
hp_scalar_type p_ij = elem.second;
hp_scalar_type res = hp_scalar_type(p_ij) * multiplier / q_ij_1 / n;
// Sum positive force
for(unsigned int d = 0; d < _emb_dimension; d++)
pos_f[ind1 + d] += res * buff[d] * multiplier; //(p_ij*q_j*mult) * (yi-yj)
}
}
#ifdef __USE_GCD__
);
#endif
}
}
}
#endif
|
jacobi-task.c | # include "poisson.h"
/* #pragma omp task/taskwait version of SWEEP. */
void sweep (int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_, int block_size)
{
int i;
int it;
int j;
double (*f)[nx][ny] = (double (*)[nx][ny])f_;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
#pragma omp parallel shared (f, u, unew) private (i, it, j) firstprivate(nx, ny, dx, dy, itold, itnew)
#pragma omp single
{
for (it = itold + 1; it <= itnew; it++) {
for (i = 0; i < nx; i++) {
#pragma omp task firstprivate(i, ny) private(j) shared(u, unew)
for (j = 0; j < ny; j++) {
(*u)[i][j] = (*unew)[i][j];
}
}
#pragma omp taskwait
// Compute a new estimate.
for (i = 0; i < nx; i++) {
#pragma omp task firstprivate(i, dx, dy, nx, ny) private(j) shared(u, unew, f)
for (j = 0; j < ny; j++) {
if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) {
(*unew)[i][j] = (*f)[i][j];
} else {
(*unew)[i][j] = 0.25 * ( (*u)[i-1][j ] + (*u)[i][j+1]
+ (*u)[i ][j-1] + (*u)[i+1][j]
+ (*f)[i ][j ] * dx * dy);
}
}
}
#pragma omp taskwait
}
}
}
|
hamming.c | /*
Copyright © INRIA 2010-2011.
Authors: Matthijs Douze & Herve Jegou
Contact: matthijs.douze@inria.fr herve.jegou@inria.fr
This software is a computer program whose purpose is to provide
efficient tools for basic yet computationally demanding tasks,
such as find k-nearest neighbors using exhaustive search
and kmeans clustering.
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
/* This code was written by Herve Jegou. Contact: herve.jegou@inria.fr */
/* Last change: June 1st, 2010 */
/* This software is governed by the CeCILL license under French law and */
/* abiding by the rules of distribution of free software. */
/* See http://www.cecill.info/licences.en.html */
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "hamming.h"
/* If SSE4.2 is available, use the specific processor instructions */
#ifdef __SSE4_2__
#include <nmmintrin.h>
#define hamming_32(pa,pb) _mm_popcnt_u32((*((const uint32 *) (pa)) ^ *((const uint32 *) (pb))))
#define hamming_64(pa,pb) _mm_popcnt_u64((*((const uint64 *) (pa)) ^ *((const uint64 *) (pb))))
#endif
#define hamming_128(a,b) (hamming_64((const uint64 *) (a),(const uint64 *) (b))+hamming_64(((const uint64 *) (a)) + 1, ((const uint64 *) (b)) + 1))
#define MIN(a,b) ((a)>(b) ? (b) : (a))
/* Define the Hamming distance by selecting the most appropriate function,
using the generic version as a backup */
/* the slice size is set to avoid testing the buffer size too often */
#define HAMMATCH_SLICESIZE 16
/* For functions that compute distances by blocks */
#define HAM_BLOCKSIZE 128
/* geometric re-allocation: add a constant size plus a relative 50% of additional memory */
#define HAMMATCH_REALLOC_NEWSIZE(oldsize) (HAMMATCH_SLICESIZE+((oldsize * 5) / 4))
static uint16 uint8_nbones[256] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
/*-------------------------------------------------------*/
/* Elementary Hamming distance computation: unoptimized */
uint16 hamming (const uint8 *bs1, const uint8 * bs2, int ncodes)
{
int i;
uint16 ham = 0;
for (i = 0; i < ncodes ; i++) {
ham += uint8_nbones[*bs1 ^ *bs2];
bs1++;
bs2++;
}
return ham;
}
#ifndef __SSE4_2__
#warning "SSE4.2 NOT USED FOR HAMMING DISTANCE COMPUTATION. Consider adding -msse4!"
static uint16 hamming_32 (const uint32 * bs1, const uint32 * bs2)
{
uint16 ham = 0;
uint32 diff = ((*bs1) ^ (*bs2));
ham = uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff];
return ham;
}
static uint16 hamming_64 (const uint64 * bs1, const uint64 * bs2)
{
uint16 ham = 0;
uint64 diff = ((*bs1) ^ (*bs2));
ham = uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff & 255];
diff >>= 8;
ham += uint8_nbones[diff];
return ham;
}
#endif
/*-------------------------------------------------------*/
/* Compute a set of Hamming distances */
static void compute_hamming_32 (uint16 * dis, const uint32 * a, const uint32 * b, int na, int nb)
{
int i, j;
const uint32 * pb = (const uint32 *) b;
for (j = 0 ; j < nb ; j++) {
const uint32 * pa = (const uint32 *) a;
for (i = 0 ; i < na ; i++) {
*dis = hamming_32 (pa, pb);
pa++;
dis++;
}
pb++;
}
}
static void compute_hamming_64 (uint16 * dis, const uint64 * a, const uint64 * b, int na, int nb)
{
int i, j;
const uint64 * pb = (const uint64 *) b;
for (j = 0 ; j < nb ; j++) {
const uint64 * pa = (const uint64 *) a;
for (i = 0 ; i < na ; i++) {
*dis = hamming_64 (pa, pb);
pa++;
dis++;
}
pb++;
}
}
static void compute_hamming_128 (uint16 * dis, const uint64 * a, const uint64 * b, int na, int nb)
{
int i, j;
const uint64 * pb = (const uint64 *) b;
for (j = 0 ; j < nb ; j++) {
const uint64 * pa = (const uint64 *) a;
for (i = 0 ; i < na ; i++) {
*dis = hamming_128 ((const uint64 *) pa, (const uint64 *) pb);
pa += 2;
dis++;
}
pb += 2;
}
}
void compute_hamming (uint16 * dis, const uint8 * a, const uint8 * b,
int na, int nb, int ncodes)
{
switch (ncodes) {
case 4: compute_hamming_32 (dis, (const uint32 *) a, (const uint32 *) b, na, nb); return;
case 8: compute_hamming_64 (dis, (const uint64 *) a, (const uint64 *) b, na, nb); return;
case 16: compute_hamming_128 (dis, (const uint64 *) a, (const uint64 *) b, na, nb); return;
default: fprintf (stderr, "# Warning: non-optimized version of compute_hamming\n");
}
int i, j;
const uint8 * pb = b;
for (j = 0 ; j < nb ; j++) {
const uint8 * pa = a;
for (i = 0 ; i < na ; i++) {
*dis = hamming (pa, pb, ncodes);
pa += ncodes;
dis++;
}
pb += ncodes;
}
}
/*-------------------------------------------------------*/
/* Count number of matches given a threshold */
static void match_hamming_count_32 (const uint32 * bs1, const uint32 * bs2, int n1, int n2, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint32 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_32 (bs1, bs2) <= ht)
posm++;
bs2++;
}
bs1++;
}
*nptr = posm;
}
static void match_hamming_count_64 (const uint64 * bs1, const uint64 * bs2, int n1, int n2, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint64 * bs1_ = bs1;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs1 = bs1_ + i;
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_64 (bs1, bs2) <= ht)
posm++;
bs2 += 1;
}
bs1 += 1; /* next signature */
}
*nptr = posm;
}
static void match_hamming_count_128 (const uint64 * bs1, const uint64 * bs2, int n1, int n2, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_128 (bs1, bs2) <= ht)
posm++;
bs2 += 2;
}
bs1 += 2; /* next signature */
}
*nptr = posm;
}
void match_hamming_count (const uint8 * bs1, const uint8 * bs2, int n1, int n2, int ht, int ncodes, size_t * nptr)
{
size_t i, j, posm = 0;
switch (ncodes) {
case 4: match_hamming_count_32 ((const uint32 *) bs1, (const uint32 *) bs2, n1, n2, ht, nptr); return;
case 8: match_hamming_count_64 ((const uint64 *) bs1, (const uint64 *) bs2, n1, n2, ht, nptr); return;
case 16: match_hamming_count_128 ((const uint64 *) bs1, (const uint64 *) bs2, n1, n2, ht, nptr); return;
default: fprintf (stderr, "# Warning: non-optimized version of match_hamming_count\n");
}
const uint8 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming (bs1, bs2, ncodes) <= ht)
posm++;
bs2 += ncodes;
}
bs1 += ncodes; /* next signature */
}
*nptr = posm;
}
/* Count number of cross-matches given a threshold */
static void crossmatch_hamming_count_32 (const uint32 * dbs, int n, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint32 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint32 * bs2 = bs1 + 1;
for (j = i + 1 ; j < n ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_32 (bs1, bs2) <= ht)
posm++;
bs2++;
}
bs1++;
}
*nptr = posm;
}
static void crossmatch_hamming_count_64 (const uint64 * dbs, int n, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint64 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint64 * bs2 = bs1 + 1;
for (j = i + 1 ; j < n ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_64 (bs1, bs2) <= ht)
posm++;
bs2++;
}
bs1++;
}
*nptr = posm;
}
static void crossmatch_hamming_count_128 (const uint64 * dbs, int n, int ht, size_t * nptr)
{
size_t i, j, posm = 0;
const uint64 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint64 * bs2 = bs1 + 2;
for (j = i + 1 ; j < n ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming_128 (bs1, bs2) <= ht)
posm++;
bs2 += 2;
}
bs1 += 2;
}
*nptr = posm;
}
void crossmatch_hamming_count (const uint8 * dbs, int n, int ht, int ncodes, size_t * nptr)
{
switch (ncodes) {
case 4: crossmatch_hamming_count_32 ((const uint32 *) dbs, n, ht, nptr); return;
case 8: crossmatch_hamming_count_64 ((const uint64 *) dbs, n, ht, nptr); return;
case 16: crossmatch_hamming_count_128 ((const uint64 *) dbs, n, ht, nptr); return;
default: fprintf (stderr, "# Warning: non-optimized version of crossmatch_hamming_count\n");
}
size_t i, j, posm = 0;
const uint8 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint8 * bs2 = bs1 + ncodes;
for (j = i + 1 ; j < n ; j++) {
/* collect the match only if this satisfies the threshold */
if (hamming (bs1, bs2, ncodes) <= ht)
posm++;
bs2 += ncodes;
}
bs1 += ncodes; /* next signature */
}
*nptr = posm;
}
/*-------------------------------------------------------*/
/* Return all matches given a threshold */
/* Compute hamming distance and report those below a given threshold in a structure array */
hammatch_t * hammatch_new (int n)
{
return (hammatch_t *) malloc (n * sizeof (hammatch_t));
}
hammatch_t * hammatch_realloc (hammatch_t * m, int n)
{
return (hammatch_t *) realloc (m, n * sizeof (hammatch_t));
}
static void match_hamming_thres_32 (const uint32 * bs1, const uint32 * bs2, int n1, int n2, int ht,
size_t bufsize, hammatch_t ** hmptr, size_t * nptr)
{
size_t i, j, posm = 0;
uint16 h;
*hmptr = hammatch_new (bufsize);
hammatch_t * hm = *hmptr;
const uint32 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
h = hamming_32 (bs1, bs2);
if (h <= ht) { /* Enough space to store another match ? */
if (posm >= bufsize) {
bufsize = HAMMATCH_REALLOC_NEWSIZE (bufsize);
*hmptr = hammatch_realloc (*hmptr, bufsize);
assert (*hmptr != NULL);
hm = (*hmptr) + posm;
}
hm->qid = i;
hm->bid = j;
hm->score = h;
hm++;
posm++;
}
bs2++; /* next signature */
}
bs1++;
}
*nptr = posm;
}
static void match_hamming_thres_64 (const uint64 * bs1, const uint64 * bs2, int n1, int n2, int ht,
size_t bufsize, hammatch_t ** hmptr, size_t * nptr)
{
size_t i, j, posm = 0;
uint16 h;
*hmptr = hammatch_new (bufsize);
hammatch_t * hm = *hmptr;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
h = hamming_64 (bs1, bs2);
if (h <= ht) { /* Enough space to store another match ? */
if (posm >= bufsize) {
bufsize = HAMMATCH_REALLOC_NEWSIZE (bufsize);
*hmptr = hammatch_realloc (*hmptr, bufsize);
assert (*hmptr != NULL);
hm = (*hmptr) + posm;
}
hm->qid = i;
hm->bid = j;
hm->score = h;
hm++;
posm++;
}
bs2++; /* next signature */
}
bs1++;
}
*nptr = posm;
}
static void match_hamming_thres_128 (const uint64 * bs1, const uint64 * bs2, int n1, int n2, int ht,
size_t bufsize, hammatch_t ** hmptr, size_t * nptr)
{
size_t i, j, posm = 0;
uint16 h;
*hmptr = hammatch_new (bufsize);
hammatch_t * hm = *hmptr;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
h = hamming_128 (bs1, bs2);
if (h <= ht) { /* Enough space to store another match ? */
if (posm >= bufsize) {
bufsize = HAMMATCH_REALLOC_NEWSIZE (bufsize);
*hmptr = hammatch_realloc (*hmptr, bufsize);
assert (*hmptr != NULL);
hm = (*hmptr) + posm;
}
hm->qid = i;
hm->bid = j;
hm->score = h;
hm++;
posm++;
}
bs2 += 2; /* next signature */
}
bs1 += 2;
}
*nptr = posm;
}
void match_hamming_thres (const uint8 * bs1, const uint8 * bs2,
int n1, int n2, int ht, int ncodes, size_t bufsize,
hammatch_t ** hmptr, size_t * nptr)
{
switch (ncodes) {
case 4: match_hamming_thres_32 ((const uint32 *) bs1, (const uint32 *) bs2, n1, n2, ht, bufsize, hmptr, nptr); return;
case 8: match_hamming_thres_64 ((const uint64 *) bs1, (const uint64 *) bs2, n1, n2, ht, bufsize, hmptr, nptr); return;
case 16: match_hamming_thres_128 ((const uint64 *) bs1, (const uint64 *) bs2, n1, n2, ht, bufsize, hmptr, nptr); return;
default: fprintf (stderr, "# Warning: non-optimized version of match_hamming_thres\n");
}
size_t i, j, posm = 0;
uint16 h;
*hmptr = hammatch_new (bufsize);
hammatch_t * hm = *hmptr;
const uint8 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* Here perform the real work of computing the distance */
h = hamming (bs1, bs2, ncodes);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
if (posm >= bufsize) {
bufsize = HAMMATCH_REALLOC_NEWSIZE (bufsize);
*hmptr = hammatch_realloc (*hmptr, bufsize);
assert (*hmptr != NULL);
hm = (*hmptr) + posm;
}
hm->qid = i;
hm->bid = j;
hm->score = h;
hm++;
posm++;
}
bs2 += ncodes; /* next signature */
}
bs1 += ncodes;
}
*nptr = posm;
}
static size_t match_hamming_thres_prealloc_32 (const uint32 * bs1,
const uint32 * bs2,
int n1, int n2, int ht,
int * idx, uint16 * hams)
{
size_t i, j, posm = 0;
uint16 h;
const uint32 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* Here perform the real work of computing the distance */
h = hamming_32 (bs1, bs2);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2++; /* next signature */
}
bs1++;
}
return posm;
}
static size_t match_hamming_thres_prealloc_64 (const uint64 * bs1,
const uint64 * bs2,
int n1, int n2, const int ht,
int * idx, uint16 * hams)
{
size_t i, j, posm = 0;
uint16 h;
const uint64 * bs1_ = bs1;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs1 = bs1_ + i;
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* Here perform the real work of computing the distance */
h = hamming_64 (bs1, bs2);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2++; /* next signature */
}
}
return posm;
}
#ifdef NOTDEFINED
/* Blocked version -> not faster, not used */
static size_t match_hamming_thres_prealloc_64 (const uint64 * bs1,
const uint64 * bs2,
const int n1, const int n2, const int ht,
int * idx, uint16 * hams)
{
size_t i, j, posm = 0, bli, blj;
uint16 h;
const uint64 * bs1_ = bs1;
const uint64 * bs2_ = bs2;
for (bli = 0 ; bli < n1 ; bli += HAM_BLOCKSIZE) {
const size_t bli_end = MIN(bli+HAM_BLOCKSIZE, n1);
for (blj = 0 ; blj < n2 ; blj += HAM_BLOCKSIZE) {
const size_t blj_end = MIN(blj+HAM_BLOCKSIZE, n2);
for (i = bli ; i < bli_end ; i++) {
bs1 = bs1_ + i;
bs2 = bs2_ + blj;
for (j = blj ; j < blj_end ; j++) {
/* Here perform the real work of computing the distance */
h = hamming_64 (bs1, bs2);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2++; /* next signature */
}
bs1++;
}
}
}
return posm;
}
#endif
static size_t match_hamming_thres_prealloc_128 (const uint64 * bs1,
const uint64 * bs2,
int n1, int n2, int ht,
int * idx, uint16 * hams)
{
size_t i, j, posm = 0;
uint16 h;
const uint64 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* Here perform the real work of computing the distance */
h = hamming_128 (bs1, bs2);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2+=2; /* next signature */
}
bs1+=2;
}
return posm;
}
size_t match_hamming_thres_prealloc (const uint8 * bs1, const uint8 * bs2,
int n1, int n2, int ht, int ncodes,
int * idx, uint16 * hams)
{
switch (ncodes) {
case 4: return match_hamming_thres_prealloc_32 ((const uint32 *) bs1,
(const uint32 *) bs2, n1, n2, ht, idx, hams);
case 8: return match_hamming_thres_prealloc_64 ((const uint64 *) bs1,
(const uint64 *) bs2, n1, n2, ht, idx, hams);
case 16: return match_hamming_thres_prealloc_128 ((const uint64 *) bs1,
(const uint64 *) bs2, n1, n2, ht, idx, hams);
default: fprintf (stderr, "# Warning: non-optimized version of match_hamming_thres\n");
}
size_t i, j, posm = 0;
uint16 h;
const uint8 * bs2_ = bs2;
for (i = 0 ; i < n1 ; i++) {
bs2 = bs2_;
for (j = 0 ; j < n2 ; j++) {
/* Here perform the real work of computing the distance */
h = hamming (bs1, bs2, ncodes);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2 += ncodes; /* next signature */
}
bs1 += ncodes;
}
return posm;
}
void crossmatch_hamming (const uint8 * dbs, long n, int ht, int ncodes,
long bufsize, hammatch_t ** hmptr, size_t * nptr)
{
size_t i, j, posm = 0;
uint16 h;
*hmptr = hammatch_new (bufsize);
hammatch_t * hm = *hmptr;
const uint8 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint8 * bs2 = bs1 + ncodes;
for (j = i + 1 ; j < n ; j++) {
/* Here perform the real work of computing the distance */
h = hamming (bs1, bs2, ncodes);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
if (posm >= bufsize) {
bufsize = HAMMATCH_REALLOC_NEWSIZE (bufsize);
*hmptr = hammatch_realloc (*hmptr, bufsize);
assert (*hmptr != NULL);
hm = (*hmptr) + posm;
}
hm->qid = i;
hm->bid = j;
hm->score = h;
hm++;
posm++;
}
bs2 += ncodes;
}
bs1 += ncodes; /* next signature */
}
*nptr = posm;
}
size_t crossmatch_hamming_prealloc (const uint8 * dbs, long n, int ht,
int ncodes, int * idx, uint16 * hams)
{
size_t i, j, posm = 0;
uint16 h;
const uint8 * bs1 = dbs;
for (i = 0 ; i < n ; i++) {
const uint8 * bs2 = bs1 + ncodes;
for (j = i + 1 ; j < n ; j++) {
/* Here perform the real work of computing the distance */
h = hamming (bs1, bs2, ncodes);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
*idx = i; idx++;
*idx = j; idx++;
*hams = h;
hams++;
posm++;
}
bs2 += ncodes;
}
bs1 += ncodes; /* next signature */
}
return posm;
}
/*-------------------------------------------*/
/* Threaded versions, if OpenMP is available */
#ifdef _OPENMP
#define HAMBLOCK 128
#define MIN(a,b) ((a)<(b) ? (a) : (b))
void compute_hamming_thread (uint16 * dis, const uint8 * a, const uint8 * b,
int na, int nb, int ncodes)
{
size_t i, j;
#pragma omp parallel shared (dis, a, b, na, nb) private (i, j)
{
#pragma omp for
for (j = 0 ; j < nb ; j++)
for (i = 0 ; i < na ; i++)
dis[j * na + i] = hamming (a + i * ncodes, b + j * ncodes, ncodes);
}
}
size_t match_hamming_thres_nt (const uint8 * bs1, const uint8 * bs2, int n1, int n2,
int ht, int ncodes, int nt, int ** keys, uint16 ** ham)
{
size_t bl, nmatches;
const int nblock1 = 1 + (n1 - 1) / HAMBLOCK;
const int nblock2 = 1 + (n2 - 1) / HAMBLOCK;
const int nblock = nblock1 * nblock2;
size_t * blcount = malloc ((nblock + 1) * sizeof (*blcount));
blcount[0] = 0;
#pragma omp parallel for private(bl)
for (bl = 0 ; bl < nblock ; bl++) {
size_t bl1 = bl / nblock1;
size_t bl2 = bl % nblock1;
size_t s1 = bl1 * HAMBLOCK;
size_t s2 = bl2 * HAMBLOCK;
size_t nb1 = MIN(n1 - s1, HAMBLOCK);
size_t nb2 = MIN(n2 - s2, HAMBLOCK);
match_hamming_count (bs1 + s1 * ncodes, bs2 + s2 * ncodes,
nb1, nb2, ht, ncodes, blcount + bl + 1);
}
/* accumulate count to determine offset */
nmatches = 0;
for (bl = 1 ; bl <= nblock ; bl++) {
if (blcount[bl] > 500)
fprintf (stderr, "bl %ld -> %ld matches (bl-1/cum = %ld)\n", bl-1, blcount[bl], blcount[bl-1]);
blcount[bl] = blcount[bl-1] + blcount[bl];
}
nmatches = blcount[nblock];
fprintf (stderr, "nmatches = %d\n", nmatches);
*keys = malloc (nmatches * 2 * sizeof(**keys));
*ham = malloc (nmatches * sizeof(**ham));
#pragma omp parallel for private(bl)
for (bl = 0 ; bl < nblock ; bl++) {
size_t bl1 = bl / nblock1;
size_t bl2 = bl % nblock1;
size_t s1 = bl1 * HAMBLOCK;
size_t s2 = bl2 * HAMBLOCK;
size_t nb1 = MIN(n1 - s1, HAMBLOCK);
size_t nb2 = MIN(n2 - s2, HAMBLOCK);
match_hamming_thres_prealloc (bs1 + s1 * ncodes, bs2 + s2 * ncodes,
nb1, nb2, ht, ncodes,
(int*) (*keys) + blcount[bl] * 2,
(uint16*) (*ham) + blcount[bl]);
}
free (blcount);
return nmatches;
}
#endif /* _OPENMP */
#undef HAM_BLOCKSIZE
|
reduce_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GraphBLAS.h"
#if defined ( _OPENMP )
#include <omp.h>
#endif
// #define N 65536
#define N 16384
int main (void)
{
#if defined ( _OPENMP )
double t0 = omp_get_wtime ( ) ;
#endif
// start GraphBLAS
GrB_init (GrB_NONBLOCKING) ;
int nthreads ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads) ;
printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ;
int nthreads_max ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads_max) ;
printf ("# of threads: %d\n", nthreads_max) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("GPU warmup time: %g\n", t0) ;
t0 = omp_get_wtime ( ) ;
#endif
GrB_Index nrows = N ;
GrB_Index ncols = N ;
GrB_Matrix A ;
GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ;
GrB_Index *I = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
GrB_Index *J = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
int64_t *X = (int64_t *) malloc (nrows * ncols * sizeof (int64_t)) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_max) schedule(static)
for (k = 0 ; k < N*N ; k++)
{
// k = i * N + j ;
int64_t i = k / N ;
int64_t j = k % N ;
// int x = (int) (rand ( ) & 0xFF) ;
int x = (int) (k & 0xFF) ;
I [k] = i ;
J [k] = j ;
X [k] = x ;
}
GrB_Index nvals = N*N ;
GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ;
free (I) ;
free (J) ;
free (X) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("time to create matrix: %g\n", t0) ;
#endif
GrB_Index result ;
double t1 ;
printf ("\nreduce to a scalar:\n") ;
for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++)
{
GxB_Global_Option_set (GxB_GLOBAL_NTHREADS, nthreads) ;
#if defined ( _OPENMP )
double t = omp_get_wtime ( ) ;
#endif
GrB_Matrix_reduce_UINT64 (&result, NULL, GrB_PLUS_MONOID_INT64,
A, NULL) ;
#if defined ( _OPENMP )
t = omp_get_wtime ( ) - t ;
if (nthreads == 1) t1 = t ;
printf ("nthreads %3d time: %12.6f speedup %8.2f\n",
nthreads, t, t1/t) ;
#endif
}
printf ("result %" PRId64 "\n", result) ;
// free everyting
GrB_Matrix_free (&A) ;
GrB_finalize ( ) ;
}
|
CutPursuit.h | #pragma once
#include "Graph.h"
#include <math.h>
#include <queue>
#include <iostream>
#include <fstream>
#include <boost/graph/boykov_kolmogorov_max_flow.hpp>
namespace CP {
template <typename T>
struct CPparameter
{
T reg_strenth; //regularization strength, multiply the edge weight
uint32_t cutoff; //minimal component size
uint32_t flow_steps; //number of steps in the optimal binary cut computation
uint32_t kmeans_ite; //number of iteration in the kmeans sampling
uint32_t kmeans_resampling; //number of kmeans re-intilialization
uint32_t verbose; //verbosity
uint32_t max_ite_main; //max number of iterations in the main loop
bool backward_step; //indicates if a backward step should be performed
double stopping_ratio; //when (E(t-1) - E(t) / (E(0) - E(t)) is too small, the algorithm stops
fidelityType fidelity; //the fidelity function
double smoothing; //smoothing term (for Kl divergence only)
bool parallel; //enable/disable parrallelism
T weight_decay; //for continued optimization of the flow steps
};
template <typename T>
class CutPursuit
{
public:
Graph<T> main_graph; //the Graph structure containing the main structure
Graph<T> reduced_graph; //the reduced graph whose vertices are the connected component
std::vector<std::vector<VertexDescriptor<T>>> components; //contains the list of the vertices in each component
std::vector<VertexDescriptor<T>> root_vertex; //the root vertex for each connected components
std::vector<bool> saturated_components; //is the component saturated (uncuttable)
std::vector<std::vector<EdgeDescriptor>> borders; //the list of edges forming the borders between the connected components
VertexDescriptor<T> source; //source vertex for graph cut
VertexDescriptor<T> sink; //sink vertex
uint32_t dim; // dimension of the data
uint32_t nVertex; // number of data point
uint32_t nEdge; // number of edges between vertices (not counting the edge to source/sink)
CP::VertexIterator<T> lastIterator; //iterator pointing to the last vertex which is neither sink nor source
CPparameter<T> parameter;
public:
CutPursuit(uint32_t nbVertex = 1)
{
this->main_graph = Graph<T>(nbVertex);
this->reduced_graph = Graph<T>(1);
this->components = std::vector<std::vector<VertexDescriptor<T>>>(1);
this->root_vertex = std::vector<VertexDescriptor<T>>(1,0);
this->saturated_components = std::vector<bool>(1,false);
this->source = VertexDescriptor<T>();
this->sink = VertexDescriptor<T>();
this->dim = 1;
this->nVertex = 1;
this->nEdge = 0;
this->parameter.flow_steps = 3;
this->parameter.kmeans_ite = 5;
this->parameter.kmeans_resampling = 3;
this->parameter.verbose = 2;
this->parameter.max_ite_main = 6;
this->parameter.backward_step = true;
this->parameter.stopping_ratio = 0.0001;
this->parameter.fidelity = L2;
this->parameter.smoothing = 0.1;
this->parameter.parallel = true;
this->parameter.weight_decay = 0.7;
}
virtual ~CutPursuit(){
};
//=============================================================================================
std::pair<std::vector<T>, std::vector<T>> run()
{
//first initilialize the structure
this->initialize();
if (this->parameter.verbose > 0)
{
std::cout << "Graph " << boost::num_vertices(this->main_graph) << " vertices and "
<< boost::num_edges(this->main_graph) << " edges and observation of dimension "
<< this->dim << '\n';
}
T energy_zero = this->compute_energy().first; //energy with 1 component
T old_energy = energy_zero; //energy at the previous iteration
//vector with time and energy, useful for benchmarking
std::vector<T> energy_out(this->parameter.max_ite_main ),time_out(this->parameter.max_ite_main);
TimeStack ts; ts.tic();
//the main loop
for (uint32_t ite_main = 1; ite_main <= this->parameter.max_ite_main; ite_main++)
{
//--------those two lines are the whole iteration-------------------------
uint32_t saturation = this->split(); //compute optimal binary partition
this->reduce(); //compute the new reduced graph
//-------end of the iteration - rest is stopping check and display------
std::pair<T,T> energy = this->compute_energy();
energy_out.push_back((energy.first + energy.second));
time_out.push_back(ts.tocDouble());
if (this->parameter.verbose > 1)
{
printf("Iteration %3i - %4i components - ", ite_main, (int)this->components.size());
printf("Saturation %5.1f %% - ",100*saturation / (double) this->nVertex);
switch (this->parameter.fidelity)
{
case L2:
{
printf("Quadratic Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
case linear:
{
printf("Linear Energy %10.1f - ", energy.first + energy.second);
break;
}
case KL:
{
printf("KL Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
case SPG:
{
printf("Quadratic Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
}
std::cout << "Timer " << ts.toc() << std::endl;
}
//----stopping checks-----
if (saturation == (double) this->nVertex)
{ //all components are saturated
if (this->parameter.verbose > 1)
{
std::cout << "All components are saturated" << std::endl;
}
break;
}
if ((old_energy - energy.first - energy.second) / (old_energy)
< this->parameter.stopping_ratio)
{ //relative energy progress stopping criterion
if (this->parameter.verbose > 1)
{
std::cout << "Stopping criterion reached" << std::endl;
}
break;
}
if (ite_main>=this->parameter.max_ite_main)
{ //max number of iteration
if (this->parameter.verbose > 1)
{
std::cout << "Max number of iteration reached" << std::endl;
}
break;
}
old_energy = energy.first + energy.second;
}
if (this->parameter.cutoff > 0)
{
this->cutoff();
}
return std::pair<std::vector<T>, std::vector<T>>(energy_out, time_out);
}
//=============================================================================================
//=========== VIRTUAL METHODS DEPENDING ON THE CHOICE OF FIDELITY FUNCTION =====================
//=============================================================================================
//
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split()
{
//compute the optimal binary partition
return 0;
}
//=============================================================================================
//================================ compute_energy_L2 ====================================
//=============================================================================================
virtual std::pair<T,T> compute_energy()
{
//compute the current energy
return std::pair<T,T>(0,0);
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com)
{
//compute the optimal the values associated with the current partition
return std::pair<std::vector<T>, T>(std::vector<T>(0),0);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2)
{
//compute the gain of mergeing two connected components
return std::pair<std::vector<T>, T>(std::vector<T>(0),0);
}
//=============================================================================================
//========================== END OF VIRTUAL METHODS ===========================================
//=============================================================================================
//
//=============================================================================================
//============================= INITIALIZE ===========================================
//=============================================================================================
void initialize()
{
//build the reduced graph with one component, fill the first vector of components
//and add the sink and source nodes
VertexIterator<T> ite_ver, ite_ver_end;
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
this->components[0] = std::vector<VertexDescriptor<T>> (0);//(this->nVertex);
this->root_vertex[0] = *boost::vertices(this->main_graph).first;
this->nVertex = boost::num_vertices(this->main_graph);
this->nEdge = boost::num_edges(this->main_graph);
//--------compute the first reduced graph----------------------------------------------------------
for (boost::tie(ite_ver, ite_ver_end) = boost::vertices(this->main_graph);
ite_ver != ite_ver_end; ++ite_ver)
{
this->components[0].push_back(*ite_ver);
}
this->lastIterator = ite_ver;
this->compute_value(0);
//--------build the link to source and sink--------------------------------------------------------
this->source = boost::add_vertex(this->main_graph);
this->sink = boost::add_vertex(this->main_graph);
uint32_t eIndex = boost::num_edges(this->main_graph);
ite_ver = boost::vertices(this->main_graph).first;
for (uint32_t ind_ver = 0; ind_ver < this->nVertex ; ind_ver++)
{
// note that source and edge will have many nieghbors, and hence boost::edge should never be called to get
// the in_edge. use the out_edge and then reverse_Edge
addDoubledge<T>(this->main_graph, this->source, boost::vertex(ind_ver, this->main_graph), 0.,
eIndex, edge_attribute_map , false);
eIndex +=2;
addDoubledge<T>(this->main_graph, boost::vertex(ind_ver, this->main_graph), this->sink, 0.,
eIndex, edge_attribute_map, false);
eIndex +=2;
++ite_ver;
}
}
//=============================================================================================
//================================ COMPUTE_REDUCE_VALUE ====================================
//=============================================================================================
void compute_reduced_value()
{
for (uint32_t ind_com = 0; ind_com < this->components.size(); ++ind_com)
{ //compute the reduced value of each component
compute_value(ind_com);
}
}
//=============================================================================================
//============================= ACTIVATE_EDGES ==========================================
//=============================================================================================
uint32_t activate_edges(bool allows_saturation = true)
{ //this function analyzes the optimal binary partition to detect:
//- saturated components (i.e. uncuttable)
//- new activated edges
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//saturation is the proportion of nodes in saturated components
uint32_t saturation = 0;
uint32_t nb_comp = this->components.size();
//---- first check if the component are saturated-------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{ //ind_com is saturated, we increement saturation by ind_com size
saturation += this->components[ind_com].size();
continue;
}
std::vector<T> totalWeight(2,0);
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ind_ver++)
{
bool isSink
= (vertex_attribute_map(this->components[ind_com][ind_ver]).color
== vertex_attribute_map(this->sink).color);
if (isSink)
{
totalWeight[0] += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
else
{
totalWeight[1] += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
}
if (allows_saturation && ((totalWeight[0] == 0)||(totalWeight[1] == 0)))
{
//the component is saturated
this->saturateComponent(ind_com);
saturation += this->components[ind_com].size();
}
}
//----check which edges have been activated----
EdgeIterator<T> ite_edg, ite_edg_end;
uint32_t color_v1, color_v2, color_combination;
for (boost::tie(ite_edg, ite_edg_end) = boost::edges(this->main_graph);
ite_edg != ite_edg_end; ++ite_edg)
{
if (!edge_attribute_map(*ite_edg).realEdge )
{
continue;
}
color_v1 = vertex_attribute_map(boost::source(*ite_edg, this->main_graph)).color;
color_v2 = vertex_attribute_map(boost::target(*ite_edg, this->main_graph)).color;
//color_source = 0, color_sink = 4, uncolored = 1
//we want an edge when a an interface source/sink
//this corresponds to a sum of 4
//for the case of uncolored nodes we arbitrarily chose source-uncolored
color_combination = color_v1 + color_v2;
if ((color_combination == 0)||(color_combination == 2)||(color_combination == 2)
||(color_combination == 8))
{ //edge between two vertices of the same color
continue;
}
//the edge is active!
edge_attribute_map(*ite_edg).isActive = true;
edge_attribute_map(*ite_edg).capacity = 0;
vertex_attribute_map(boost::source(*ite_edg, this->main_graph)).isBorder = true;
vertex_attribute_map(boost::target(*ite_edg, this->main_graph)).isBorder = true;
}
return saturation;
}
//=============================================================================================
//============================= REDUCE ===========================================
//=============================================================================================
void reduce()
{ //compute the reduced graph, and if need be performed a backward check
this->compute_connected_components();
if (this->parameter.backward_step)
{ //compute the structure of the reduced graph
this->compute_reduced_graph();
//check for beneficial merges
this->merge(false);
}
else
{ //compute only the value associated to each connected components
this->compute_reduced_value();
}
}
//=============================================================================================
//============================== compute_connected_components=========================================
//=============================================================================================
void compute_connected_components()
{ //this function compute the connected components of the graph with active edges removed
//the boolean vector indicating wether or not the edges and vertices have been seen already
//the root is the first vertex of a component
//this function is written such that the new components are appended at the end of components
//this allows not to recompute saturated component
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map =get(boost::vertex_index, this->main_graph);
//indicate which edges and nodes have been seen already by the dpsearch
std::vector<bool> edges_seen (this->nEdge, false);
std::vector<bool> vertices_seen (this->nVertex+2, false);
vertices_seen[vertex_index_map(this->source)] = true;
vertices_seen[vertex_index_map(this->sink)] = true;
//-------- start with the known roots------------------------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < this->root_vertex.size(); ind_com++)
{
VertexDescriptor<T> root = this->root_vertex[ind_com]; //the first vertex of the component
if (this->saturated_components[ind_com])
{ //this component is saturated, we don't need to recompute it
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
vertices_seen[vertex_index_map(this->components[ind_com][ind_ver])] = true;
}
}
else
{ //compute the new content of this component
this->components.at(ind_com) = connected_comp_from_root(root, this->components.at(ind_com).size()
, vertices_seen , edges_seen);
}
}
//----now look for components that did not already exists----
VertexIterator<T> ite_ver;
for (ite_ver = boost::vertices(this->main_graph).first;
ite_ver != this->lastIterator; ++ite_ver)
{
if (vertices_seen[vertex_index_map(*ite_ver)])
{
continue;
} //this vertex is not currently in a connected component
VertexDescriptor<T> root = *ite_ver; //we define it as the root of a new component
uint32_t current_component_size =
this->components[vertex_attribute_map(root).in_component].size();
this->components.push_back(
connected_comp_from_root(root, current_component_size
, vertices_seen, edges_seen));
this->root_vertex.push_back(root);
this->saturated_components.push_back(false);
}
this->components.shrink_to_fit();
}
//=============================================================================================
//============================== CONNECTED_COMP_FROM_ROOT=========================================
//=============================================================================================
inline std::vector<VertexDescriptor<T>> connected_comp_from_root(const VertexDescriptor<T> & root
, const uint32_t & size_comp, std::vector<bool> & vertices_seen , std::vector<bool> & edges_seen)
{
//this function compute the connected component of the graph with active edges removed
// associated with the root ROOT by performing a depth search first
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = get(boost::vertex_index, this->main_graph);
EdgeIndexMap<T> edge_index_map = get(&EdgeAttribute<T>::index, this->main_graph);
std::vector<VertexDescriptor<T>> vertices_added; //the vertices in the current connected component
// vertices_added contains the vertices that have been added to the current coomponent
vertices_added.reserve(size_comp);
//heap_explore contains the vertices to be added to the current component
std::vector<VertexDescriptor<T>> vertices_to_add;
vertices_to_add.reserve(size_comp);
VertexDescriptor<T> vertex_current; //the node being consideed
EdgeDescriptor edge_current, edge_reverse; //the edge being considered
//fill the heap with the root node
vertices_to_add.push_back(root);
while (vertices_to_add.size()>0)
{ //as long as there are vertices left to add
vertex_current = vertices_to_add.back(); //the current node is the last node to add
vertices_to_add.pop_back(); //remove the current node from the vertices to add
if (vertices_seen[vertex_index_map(vertex_current)])
{ //this vertex has already been treated
continue;
}
vertices_added.push_back(vertex_current); //we add the current node
vertices_seen[vertex_index_map(vertex_current)] = true ; //and flag it as seen
//----we now explore the neighbors of current_node
typename boost::graph_traits<Graph<T>>::out_edge_iterator ite_edg, ite_edg_end;
for (boost::tie(ite_edg,ite_edg_end) = boost::out_edges(vertex_current, this->main_graph);
ite_edg != ite_edg_end; ++ite_edg)
{ //explore edges leaving current_node
edge_current = *ite_edg;
if (edge_attribute_map(*ite_edg).isActive || (edges_seen[edge_index_map(edge_current)]))
{ //edge is either active or treated, we skip it
continue;
}
//the target of this edge is a node to add
edge_reverse = edge_attribute_map(edge_current).edge_reverse;
edges_seen[edge_index_map(edge_current)] = true;
edges_seen[edge_index_map(edge_reverse)] = true;
vertices_to_add.push_back(boost::target(edge_current, this->main_graph));
}
}
vertices_added.shrink_to_fit();
return vertices_added;
}
//=============================================================================================
//================================ COMPUTE_REDUCE_GRAPH ====================================
//=============================================================================================
void compute_reduced_graph()
{ //compute the adjacency structure between components as well as weight and value of each component
//this is stored in the reduced graph structure
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
this->reduced_graph = Graph<T>(this->components.size());
VertexAttributeMap<T> component_attribute_map = boost::get(boost::vertex_bundle, this->reduced_graph);
//----fill the value sof the reduced graph----
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
{
std::pair<std::vector<T>, T> component_values_and_weight = this->compute_value(ind_com);
//----fill the value and weight field of the reduced graph-----------------------------
VertexDescriptor<T> reduced_vertex = boost::vertex(ind_com, this->reduced_graph);
component_attribute_map[reduced_vertex] = VertexAttribute<T>(this->dim);
component_attribute_map(reduced_vertex).weight
= component_values_and_weight.second;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
component_attribute_map(reduced_vertex).value[i_dim]
= component_values_and_weight.first[i_dim];
}
}
//------compute the edges of the reduced graph
EdgeAttributeMap<T> border_edge_attribute_map = boost::get(boost::edge_bundle, this->reduced_graph);
this->borders.clear();
EdgeDescriptor edge_current, border_edge_current;
uint32_t ind_border_edge = 0, comp1, comp2, component_source, component_target;
VertexDescriptor<T> source_component, target_component;
bool reducedEdgeExists;
typename boost::graph_traits<Graph<T>>::edge_iterator ite_edg, ite_edg_end;
for (boost::tie(ite_edg,ite_edg_end) = boost::edges(this->main_graph); ite_edg != ite_edg_end; ++ite_edg)
{
if (!edge_attribute_map(*ite_edg).realEdge)
{ //edges linking the source or edge node do not take part
continue;
}
edge_current = *ite_edg;
//compute the connected components of the source and target of current_edge
comp1 = vertex_attribute_map(boost::source(edge_current, this->main_graph)).in_component;
comp2 = vertex_attribute_map(boost::target(edge_current, this->main_graph)).in_component;
if (comp1==comp2)
{ //this edge links two nodes in the same connected component
continue;
}
//by convention we note component_source the smallest index and
//component_target the largest
component_source = std::min(comp1,comp2);
component_target = std::max(comp1,comp2);
//retrieve the corresponding vertex in the reduced graph
source_component = boost::vertex(component_source, this->reduced_graph);
target_component = boost::vertex(component_target, this->reduced_graph);
//try to add the border-edge linking those components in the reduced graph
boost::tie(border_edge_current, reducedEdgeExists)
= boost::edge(source_component, target_component, this->reduced_graph);
if (!reducedEdgeExists)
{ //this border-edge did not already existed in the reduced graph
//border_edge_current = boost::add_edge(source_component, target_component, this->reduced_graph).first;
border_edge_current = boost::add_edge(source_component, target_component, this->reduced_graph).first;
border_edge_attribute_map(border_edge_current).index = ind_border_edge;
border_edge_attribute_map(border_edge_current).weight = 0;
ind_border_edge++;
//create a new entry for the borders list containing this border
this->borders.push_back(std::vector<EdgeDescriptor>(0));
}
//add the weight of the current edge to the weight of the border-edge
border_edge_attribute_map(border_edge_current).weight += 0.5*edge_attribute_map(edge_current).weight;
this->borders[border_edge_attribute_map(border_edge_current).index].push_back(edge_current);
}
}
//=============================================================================================
//================================ MERGE ====================================
//=============================================================================================
uint32_t merge(bool is_cutoff)
{
// TODO: right now we only do one loop through the heap of potential mergeing, and only
//authorize one mergeing per component. We could update the gain and merge until it is no longer
//beneficial
//check wether the energy can be decreased by removing edges from the reduced graph
//----load graph structure---
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexAttributeMap<T> component_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
EdgeAttributeMap<T> border_edge_attribute_map
= boost::get(boost::edge_bundle, this->reduced_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexIndexMap<T> component_index_map = boost::get(boost::vertex_index, this->reduced_graph);
//-----------------------------------
EdgeDescriptor border_edge_current;
typename boost::graph_traits<Graph<T>>::edge_iterator ite_border, ite_border_end;
typename std::vector<EdgeDescriptor>::iterator ite_border_edge;
VertexDescriptor<T> source_component, target_component;
uint32_t ind_source_component, ind_target_component, border_edge_currentIndex;
//gain_current is the vector of gains associated with each mergeing move
//std::vector<T> gain_current(boost::num_edges(this->reduced_graph));
//we store in merge_queue the potential mergeing with a priority on the potential gain
std::priority_queue<ComponentsFusion<T>, std::vector<ComponentsFusion<T>>, lessComponentsFusion<T>> merge_queue;
T gain; // the gain obtained by removing the border corresponding to the edge in the reduced graph
for (boost::tie(ite_border,ite_border_end) = boost::edges(this->reduced_graph); ite_border != ite_border_end; ++ite_border)
{
//a first pass go through all the edges in the reduced graph and compute the gain obtained by
//mergeing the corresponding vertices
border_edge_current = *ite_border;
border_edge_currentIndex = border_edge_attribute_map(border_edge_current).index;
//retrieve the two components corresponding to this border
source_component = boost::source(border_edge_current, this->reduced_graph);
target_component = boost::target(border_edge_current, this->reduced_graph);
if (is_cutoff && component_attribute_map(source_component).weight > this->parameter.cutoff
&&component_attribute_map(target_component).weight > this->parameter.cutoff)
{
continue;
}
ind_source_component = component_index_map(source_component);
ind_target_component = component_index_map(target_component);
//----now compute the gain of mergeing those two components-----
// compute the fidelity lost by mergeing the two connected components
std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(source_component, target_component);
// the second part is due to the removing of the border
gain = merge_gain.second
+ border_edge_attribute_map(border_edge_current).weight * this->parameter.reg_strenth;
//mergeing_information store the indexes of the components as well as the edge index and the gain
//in a structure ordered by the gain
ComponentsFusion<T> mergeing_information(ind_source_component, ind_target_component, border_edge_currentIndex, gain);
mergeing_information.merged_value = merge_gain.first;
if (is_cutoff || gain>0)
{ //it is beneficial to merge those two components
//we add them to the merge_queue
merge_queue.push(mergeing_information);
//gain_current.at(border_edge_currentIndex) = gain;
}
}
uint32_t n_merged = 0;
//----go through the priority queue of merges and perform them as long as it is beneficial---
//is_merged indicate which components no longer exists because they have been merged with a neighboring component
std::vector<bool> is_merged(this->components.size(), false);
//to_destroy indicates the components that are needed to be removed
std::vector<bool> to_destroy(this->components.size(), false);
while(merge_queue.size()>0)
{ //loop through the potential mergeing and accept the ones that decrease the energy
ComponentsFusion<T> mergeing_information = merge_queue.top();
if (!is_cutoff && mergeing_information.merge_gain<=0)
{ //no more mergeing provide a gain in energy
break;
}
merge_queue.pop();
if (is_merged.at(mergeing_information.comp1) || (is_merged.at(mergeing_information.comp2)))
{
//at least one of the components have already been merged
continue;
}
n_merged++;
//---proceed with the fusion of comp1 and comp2----
//add the vertices of comp2 to comp1
this->components[mergeing_information.comp1].insert(this->components[mergeing_information.comp1].end()
,components[mergeing_information.comp2].begin(), this->components[mergeing_information.comp2].end());
//if comp1 was saturated it might not be anymore
this->saturated_components[mergeing_information.comp1] = false;
//the new weight is the sum of both weights
component_attribute_map(mergeing_information.comp1).weight
+= component_attribute_map(mergeing_information.comp2).weight;
//the new value is already computed in mergeing_information
component_attribute_map(mergeing_information.comp1).value = mergeing_information.merged_value;
//we deactivate the border between comp1 and comp2
for (ite_border_edge = this->borders.at(mergeing_information.border_index).begin();
ite_border_edge != this->borders.at(mergeing_information.border_index).end() ; ++ite_border_edge)
{
edge_attribute_map(*ite_border_edge).isActive = false;
}
is_merged.at(mergeing_information.comp1) = true;
is_merged.at(mergeing_information.comp2) = true;
to_destroy.at(mergeing_information.comp2) = true;
}
//we now rebuild the vectors components, rootComponents and saturated_components
std::vector<std::vector<VertexDescriptor<T>>> new_components;
std::vector<VertexDescriptor<T>> new_root_vertex;
std::vector<bool> new_saturated_components;
uint32_t ind_new_component = 0;
for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
{
if (to_destroy.at(ind_com))
{ //this component has been removed
continue;
}//this components is kept
new_components.push_back(this->components.at(ind_com));
new_root_vertex.push_back(this->root_vertex.at(ind_com));
new_saturated_components.push_back(saturated_components.at(ind_com));
//if (is_merged.at(ind_com))
//{ //we need to update the value of the vertex in this component
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
vertex_attribute_map(this->components[ind_com][ind_ver]).value
= component_attribute_map(boost::vertex(ind_com, this->reduced_graph)).value;
vertex_attribute_map(this->components[ind_com][ind_ver]).in_component
= ind_new_component;//ind_com;
}
//}
ind_new_component++;
}
this->components = new_components;
this->root_vertex = new_root_vertex;
this->saturated_components = new_saturated_components;
return n_merged;
}
//=============================================================================================
//================================ CUTOFF ====================================
//=============================================================================================
void cutoff()
{
int i = 0;
uint32_t n_merged;
while (true)
{
//this->compute_connected_components();
this->compute_reduced_graph();
n_merged = merge(true);
i++;
if (n_merged==0 || i>50)
{
break;
}
}
}
// //=============================================================================================
// //================================ CUTOFF ====================================
// //=============================================================================================
// void cutoff()
// {
// // Loop through all components and merge the one smaller than the cutoff.
// // It merges components which increase he energy the least
// //----load graph structure---
// VertexAttributeMap<T> vertex_attribute_map
// = boost::get(boost::vertex_bundle, this->main_graph);
// VertexAttributeMap<T> reduced_vertex_attribute_map
// = boost::get(boost::vertex_bundle, this->reduced_graph);
// EdgeAttributeMap<T> reduced_edge_attribute_map
// = boost::get(boost::edge_bundle, this->reduced_graph);
// EdgeAttributeMap<T> edge_attribute_map
// = boost::get(boost::edge_bundle, this->main_graph);
// VertexIndexMap<T> reduced_vertex_index_map = boost::get(boost::vertex_index, this->reduced_graph);
// EdgeIndexMap<T> reduced_edge_index_map = get(&EdgeAttribute<T>::index, this->reduced_graph);
// //-----------------------------------
// typename boost::graph_traits<Graph<T>>::vertex_iterator ite_comp, ite_comp_end;
// typename boost::graph_traits<Graph<T>>::out_edge_iterator ite_edg_out, ite_edg_out_end;
// typename boost::graph_traits<Graph<T>>::in_edge_iterator ite_edg_in, ite_edg_in_end;
// typename std::vector<EdgeDescriptor>::iterator ite_border_edge;
// VertexDescriptor<T> current_vertex, neighbor_vertex;
// //gain_current is the vector of gains associated with each mergeing move
// //we store in merge_queue the potential mergeing with a priority on the potential gain
// T gain; // the gain obtained by removing the border corresponding to the edge in the reduced graph
// std::vector<bool> to_destroy(this->components.size(), false); //components merged to be removed
// while (true)
// {
// this->compute_connected_components();
// this->compute_reduced_graph();
// bool has_merged = false;
// std::cout << "CUTTING OFF : " << this->components.size() << "COMPONENTS " << std::endl;
// for (boost::tie(ite_comp,ite_comp_end) = boost::vertices(this->reduced_graph); ite_comp != ite_comp_end; ++ite_comp)
// {
// current_vertex = *ite_comp;
// if (reduced_vertex_attribute_map(current_vertex).weight > this->parameter.cutoff
// || to_destroy.at(reduced_vertex_index_map(current_vertex)))
// {//component big enough to not be cut or already removed
// continue;
// }
// std::priority_queue<ComponentsFusion<T>, std::vector<ComponentsFusion<T>>, lessComponentsFusion<T>> merge_queue;
//std::cout << "COMPONENT " << reduced_vertex_index_map(current_vertex) << " IS OF SIZE"<< reduced_vertex_attribute_map(current_vertex).weight << std::endl;
// for (boost::tie(ite_edg_out,ite_edg_out_end) = boost::out_edges(current_vertex, this->reduced_graph);
// ite_edg_out != ite_edg_out_end; ++ite_edg_out)
// { //explore all neighbors
// neighbor_vertex = boost::target(*ite_edg_out, this->reduced_graph);
// std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(current_vertex, neighbor_vertex);
// gain = merge_gain.second
// + reduced_edge_attribute_map(*ite_edg_out).weight * this->parameter.reg_strenth;
// ComponentsFusion<T> mergeing_information(reduced_vertex_index_map(current_vertex), reduced_vertex_index_map(neighbor_vertex)
// , reduced_edge_index_map(*ite_edg_out), gain);
// mergeing_information.merged_value = merge_gain.first;
// merge_queue.push(mergeing_information);
//std::cout << " NEI OUT " <<reduced_vertex_index_map(neighbor_vertex) << " GAIN"<< gain << std::endl;
// }
// for (boost::tie(ite_edg_in,ite_edg_in_end) = boost::in_edges(current_vertex, this->reduced_graph);
// ite_edg_in != ite_edg_in_end; ++ite_edg_in)
// { //explore all neighbors
// neighbor_vertex = boost::source(*ite_edg_in, this->reduced_graph);
// std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(current_vertex, neighbor_vertex);
// gain = merge_gain.second
// + reduced_edge_attribute_map(*ite_edg_in).weight * this->parameter.reg_strenth;
// ComponentsFusion<T> mergeing_information(reduced_vertex_index_map(current_vertex), reduced_vertex_index_map(neighbor_vertex)
// , reduced_edge_index_map(*ite_edg_in), gain);
// mergeing_information.merged_value = merge_gain.first;
// merge_queue.push(mergeing_information);
//std::cout << " NEI IN" <<reduced_vertex_index_map(neighbor_vertex) << " GAIN"<< gain << std::endl;
// }
// if (merge_queue.empty())
// {
// continue;
// }
// has_merged = true;
// //select the most advantegeous neighboring components and merge it.
// ComponentsFusion<T> mergeing_information = merge_queue.top();
//std::cout << "BEST NEIGHBORS = " << mergeing_information.comp1 << " - " << mergeing_information.comp2 << " = " << mergeing_information .merge_gain
// << " Weight " << reduced_vertex_attribute_map(mergeing_information.comp2).weight << std::endl;
// this->components[mergeing_information.comp1].insert(this->components[mergeing_information.comp1].end()
// ,components[mergeing_information.comp2].begin(), this->components[mergeing_information.comp2].end());
// //the new weight is the sum of both weights
// reduced_vertex_attribute_map(mergeing_information.comp1).weight
// += reduced_vertex_attribute_map(mergeing_information.comp2).weight;
// //the new value is already computed in mergeing_information
// reduced_vertex_attribute_map(mergeing_information.comp1).value = mergeing_information.merged_value;
// //we deactivate the border between comp1 and comp2
// for (ite_border_edge = this->borders.at(mergeing_information.border_index).begin();
// ite_border_edge != this->borders.at(mergeing_information.border_index).end() ; ++ite_border_edge)
// {
// edge_attribute_map(*ite_border_edge).isActive = false;
// }
// to_destroy.at(mergeing_information.comp2) = true;
//std::cout << "=> " << reduced_vertex_index_map(current_vertex) << " IS OF SIZE"<< reduced_vertex_attribute_map(current_vertex).weight << std::endl;
// }
// //if (!has_merged)
// //{
// break;
// //}
// }
// //we now rebuild the vectors components, rootComponents and saturated_components
// std::vector<std::vector<VertexDescriptor<T>>> new_components;
// uint32_t ind_new_component = 0;
// for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
// {
// if (to_destroy.at(ind_com))
// { //this component has been removed
// continue;
// }//this components is kept
// new_components.push_back(this->components.at(ind_com));
// //if (is_merged.at(ind_com))
// //{ //we need to update the value of the vertex in this component
// for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
// {
// vertex_attribute_map(this->components[ind_com][ind_ver]).value
// = reduced_vertex_attribute_map(boost::vertex(ind_com, this->reduced_graph)).value;
// vertex_attribute_map(this->components[ind_com][ind_ver]).in_component
// = ind_new_component;//ind_com;
// }
// //}
// ind_new_component++;
// }
// this->components = new_components;
// }
//===============================================================================================
//==========================saturateComponent====================================================
//===============================================================================================
inline void saturateComponent(const uint32_t & ind_com)
{ //this component is uncuttable and needs to be removed from further graph-cuts
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
this->saturated_components[ind_com] = true;
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
VertexDescriptor<T> desc_v = this->components[ind_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
EdgeDescriptor edg_ver2source = boost::edge(desc_v, this->source,this->main_graph).first;
EdgeDescriptor edg_source2ver = edge_attribute_map(edg_ver2source).edge_reverse; //use edge_reverse instead
EdgeDescriptor edg_sink2ver = boost::edge(desc_v, this->sink,this->main_graph).first;
// we set the capacities of edges to source and sink to zero
edge_attribute_map(edg_source2ver).capacity = 0.;
edge_attribute_map(edg_sink2ver).capacity = 0.;
}
}
};
}
|
target-data.c | // RUN: %libomptarget-compile-generic -fopenmp-extensions
// RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace
#include <omp.h>
#include <stdio.h>
#define CHECK_PRESENCE(Var1, Var2, Var3) \
printf(" presence of %s, %s, %s: %d, %d, %d\n", \
#Var1, #Var2, #Var3, \
omp_target_is_present(&Var1, omp_get_default_device()), \
omp_target_is_present(&Var2, omp_get_default_device()), \
omp_target_is_present(&Var3, omp_get_default_device()))
int main() {
int m, r, d;
// CHECK: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// -----------------------------------------------------------------------
// CHECK-NEXT: check:{{.*}}
printf("check: dyn>0, hold=0, dec/reset dyn=0\n");
// CHECK-NEXT: structured{{.*}}
printf(" structured dec of dyn\n");
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// CHECK-NEXT: dynamic{{.*}}
printf(" dynamic dec/reset of dyn\n");
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// -----------------------------------------------------------------------
// CHECK: check:{{.*}}
printf("check: dyn=0, hold>0, dec/reset dyn=0, dec hold=0\n");
// Structured dec of dyn would require dyn>0.
// CHECK-NEXT: dynamic{{.*}}
printf(" dynamic dec/reset of dyn\n");
#pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// -----------------------------------------------------------------------
// CHECK: check:{{.*}}
printf("check: dyn>0, hold>0, dec/reset dyn=0, dec hold=0\n");
// CHECK-NEXT: structured{{.*}}
printf(" structured dec of dyn\n");
#pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// CHECK-NEXT: dynamic{{.*}}
printf(" dynamic dec/reset of dyn\n");
#pragma omp target enter data map(to: m) map(alloc: r, d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target enter data map(to: m) map(alloc: r, d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// -----------------------------------------------------------------------
// CHECK: check:{{.*}}
printf("check: dyn>0, hold>0, dec hold=0, dec/reset dyn=0\n");
// CHECK-NEXT: structured{{.*}}
printf(" structured dec of dyn\n");
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(tofrom: m) map(alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
// CHECK-NEXT: dynamic{{.*}}
printf(" dynamic dec/reset of dyn\n");
#pragma omp target enter data map(to: m) map(alloc: r, d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target enter data map(to: m) map(alloc: r, d)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target data map(ompx_hold, tofrom: m) \
map(ompx_hold, alloc: r, d)
{
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
}
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r)
// CHECK-NEXT: presence of m, r, d: 1, 1, 1
CHECK_PRESENCE(m, r, d);
#pragma omp target exit data map(from: m) map(release: r) map(delete: d)
// CHECK-NEXT: presence of m, r, d: 0, 0, 0
CHECK_PRESENCE(m, r, d);
return 0;
}
|
DRB008-indirectaccess4-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (xa2 - xa1 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with distance of 12 :
indexSet[1]- indexSet[0] = 533 - 521 = 12
So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3.
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
N is 180, two iteraions with N=0 and N= 1 have loop carried dependences.
For static even scheduling, we must have at least 180 threads (180/180=1 iterations)
so iteration 0 and 1 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 533, 525, 527, 529, 531, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
progress.c | /*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdint.h>
#include <omp.h>
#define GANG_SCHEDULING
#define MEASURE_BARRIER
#define PERIOD 2500000000UL
#define ITERATIONS 10
#define STACK_SIZE (64 * 1024)
static inline uint64_t rdtsc(void)
{
uint64_t eax, edx;
__asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
return (edx << 32) | eax;
}
int main(int argc, char *argv[])
{
int nthreads;
if(argc == 2) {
nthreads = atoi(argv[1]);
backend_span_domain(14, STACK_SIZE);
bomp_custom_init(NULL);
omp_set_num_threads(nthreads);
} else {
assert(!"Specify number of threads");
}
volatile uint64_t workcnt[32] = { 0 };
uint64_t last = rdtsc();
#ifndef CPU_BOUND
volatile uint64_t exittime[ITERATIONS] = { 0 };
#endif
for(int iter = 0;; iter = (iter + 1) % ITERATIONS) {
#ifdef CPU_BOUND
volatile bool exitnow = false;
#else
#ifdef MEASURE_BARRIER
# define MAXTHREADS 16
# define WORKMAX 5000000
static uint64_t starta[MAXTHREADS][WORKMAX];
#endif
#endif
#ifdef GANG_SCHEDULING
#pragma omp parallel
{
bomp_synchronize();
}
#endif
// Do some work
#pragma omp parallel
for(uint64_t i = 0;; i++) {
#ifndef CPU_BOUND
# ifdef MEASURE_BARRIER
uint64_t lasta = rdtsc();
# endif
# pragma omp barrier
# ifdef MEASURE_BARRIER
if(i < WORKMAX) {
starta[omp_get_thread_num()][i] = rdtsc() - lasta;
}
# endif
#endif
workcnt[omp_get_thread_num()]++;
#pragma omp master
if(rdtsc() >= last + PERIOD) {
printf("%lu: threads %d (%s), progress ", rdtsc(), nthreads, "static");
for(int n = 0; n < 32; n++) {
printf("%lu ", workcnt[n]);
}
printf("\n");
last += PERIOD;
#ifndef CPU_BOUND
if(exittime[iter] == 0) {
exittime[iter] = i + 3;
exittime[(iter + ITERATIONS - 2) % ITERATIONS] = 0;
}
}
if(exittime[iter] != 0 && exittime[iter] == i) {
break;
}
#else
exitnow = true;
}
if(exitnow) {
break;
}
#endif
}
#ifndef CPU_BOUND
static uint64_t hgram[15] = { 0 };
printf("exittime = %lu\n", exittime[iter]);
assert(exittime[iter] <= WORKMAX);
uint64_t endtime = exittime[iter] < WORKMAX ? exittime[iter] : WORKMAX;
for(int i = 0; i < endtime; i++) {
for(int n = 0; n < nthreads; n++) {
uint64_t val = starta[n][i];
for(int j = 0; j < 15; j++) {
val /= 10;
if(val == 0) {
hgram[j]++;
break;
}
}
}
}
uint64_t val = 1;
for(int i = 0; i < 15; i++) {
val *= 10;
printf("%lu\t%lu\n", val, hgram[i]);
}
#endif
}
}
|
GB_binop__bclr_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_int16
// A.*B function (eWiseMult): GB_AemultB__bclr_int16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_int16
// C+=b function (dense accum): GB_Cdense_accumb__bclr_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int16
// C=scalar+B GB_bind1st__bclr_int16
// C=scalar+B' GB_bind1st_tran__bclr_int16
// C=A+scalar GB_bind2nd__bclr_int16
// C=A'+scalar GB_bind2nd_tran__bclr_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITCLR (x, y, int16_t, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT16 || GxB_NO_BCLR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bclr_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, int16_t, 16) ; \
}
GrB_Info GB_bind1st_tran__bclr_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, int16_t, 16) ; \
}
GrB_Info GB_bind2nd_tran__bclr_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rmsprop_op.h | #pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void rmsprop_update(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr,
Context* context) {
#pragma omp parallel for
for (auto i = 0; i < N; ++i) {
// Update new mean square estimate
nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]);
// Update momentum estimate
nmom[i] = mom[i] * momentum + lr[0] * g[i] / std::sqrt(epsilon + nms[i]);
// New gradient is the momentum
ng[i] = nmom[i];
}
}
template <typename T, class Context>
class RmsPropOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
RmsPropOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
decay_(OperatorBase::GetSingleArgument<float>("decay", 0.9)),
momentum_(OperatorBase::GetSingleArgument<float>("momentum", 0.0)),
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)) {}
bool RunOnDevice() override {
CAFFE_ENFORCE(Input(LR).size() == 1);
CAFFE_ENFORCE(Input(GRAD).size() == Input(MEAN_SQUARES).size());
CAFFE_ENFORCE(Input(GRAD).size() == Input(OUTPUT_MOMENTUM).size());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
rmsprop_update<Context>(
Input(GRAD).size(),
Input(GRAD).template data<T>(),
Input(MEAN_SQUARES).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MEAN_SQUARES)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
decay_,
momentum_,
epsilon_,
Input(LR).template data<T>(),
&context_);
return true;
}
protected:
T decay_{0.9};
T momentum_{0.0};
T epsilon_{1e-8};
INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM);
};
}
|
GB_unop__abs_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint64_uint64)
// op(A') function: GB (_unop_tran__abs_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_uint8
// op(A') function: GB_tran__lnot_uint32_uint8
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_uint8
(
uint32_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ethereum_fmt_plug.c | /*
* JtR format to crack password protected Ethereum Wallets.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ethereum;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ethereum);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16 // tuned on i7-6600U
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX 1 // hack, we can't use our simd pbkdf2 code for presale wallets because of varying salt
#include "pbkdf2_hmac_sha256.h"
#include "ethereum_common.h"
#include "escrypt/crypto_scrypt.h"
#include "KeccakHash.h"
#include "aes.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_NAME "Ethereum Wallet"
#define FORMAT_LABEL "ethereum"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 16
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)];
custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
static void set_salt(void *salt)
{
cur_salt = (custom_salt *)salt;
}
static void ethereum_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned char *dpad = (unsigned char*)"\x02\x00\x00\x00\x00\x00\x00\x00";
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
if (cur_salt->type == 0) {
#ifdef SIMD_COEF_64
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
cur_salt->salt, cur_salt->saltlen,
cur_salt->iterations, master[i], 32,
0);
#endif
} else if (cur_salt->type == 1) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
crypto_scrypt((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
cur_salt->salt,
cur_salt->saltlen, cur_salt->N,
cur_salt->r, cur_salt->p,
master[i], 32);
} else if (cur_salt->type == 2) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
(unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
2000, master[i], 16, 0);
}
if (cur_salt->type == 0 || cur_salt->type == 1) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
Keccak_HashInstance hash;
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); // delimitedSuffix is 0x06 for SHA-3, and 0x01 for Keccak
Keccak_HashUpdate(&hash, master[i] + 16, 16 * 8);
Keccak_HashUpdate(&hash, cur_salt->ct, cur_salt->ctlen * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);
}
} else {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
AES_KEY akey;
Keccak_HashInstance hash;
unsigned char iv[16];
unsigned char seed[4096];
int padbyte;
int datalen;
AES_set_decrypt_key(master[i], 128, &akey);
memcpy(iv, cur_salt->encseed, 16);
AES_cbc_encrypt(cur_salt->encseed + 16, seed, cur_salt->eslen - 16, &akey, iv, AES_DECRYPT);
if (check_pkcs_pad(seed, cur_salt->eslen - 16, 16) < 0) {
memset(crypt_out[index+i], 0, BINARY_SIZE);
continue;
}
padbyte = seed[cur_salt->eslen - 16 - 1];
datalen = cur_salt->eslen - 16 - padbyte;
if (datalen < 0) {
memset(crypt_out[index+i], 0, BINARY_SIZE);
continue;
}
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01);
Keccak_HashUpdate(&hash, seed, datalen * 8);
Keccak_HashUpdate(&hash, dpad, 1 * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_ethereum = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
ethereum_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
ethereum_common_valid,
fmt_default_split,
ethereum_get_binary,
ethereum_common_get_salt,
{
ethereum_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
ethereum_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
kde_nsw.h | //
// Created by xinyan on 3/4/2020.
//
#ifndef SSKDE_KDE_NSW_H
#define SSKDE_KDE_NSW_H
#include <hnswlib/hnswlib.h>
#include <hnswlib/space_l2.h>
using hnswlib::L2Space;
using hnswlib::HierarchicalNSW;
template <typename R, typename S>
R* const_reinterpret_cast(const S* x) {
return const_cast<R*>(reinterpret_cast<const R*>(x));
}
class GraphKDE {
public:
GraphKDE(const T* x, int n, int d): n_(n), d_(d), space_((size_t) d),
hnsw_(&space_, (size_t) n, 16, 128) {
hnsw_.addPoint(const_reinterpret_cast<void, T>(x), (size_t) 0);
#pragma omp parallel for
for (int i = 1; i < n; i++) {
hnsw_.addPoint(const_reinterpret_cast<void, T>(x + d * i), (size_t) i);
}
hnsw_.setEf(64);
}
T query(const T* q, size_t k) {
auto result = hnsw_.searchKnn(q, k);
T density = 0.0;
while (!result.empty()) {
density += gaussian_kernel(result.top().first, 1.0f);
result.pop();
}
return density / n_;
}
private:
int n_;
int d_;
L2Space space_;
HierarchicalNSW<T > hnsw_;
};
#endif // SSKDE_KDE_NSW_H
|
homomorphic_functions.c | long long int sk[NUM_PRIME][4096];
long long int pk0[NUM_PRIME][4096], pk1[NUM_PRIME][4096];
long long int rlk00[NUM_PRIME][4096], rlk01[NUM_PRIME][4096], rlk10[NUM_PRIME][4096], rlk11[NUM_PRIME][4096];
long long int rlk20[NUM_PRIME][4096], rlk21[NUM_PRIME][4096], rlk30[NUM_PRIME][4096], rlk31[NUM_PRIME][4096], rlk40[NUM_PRIME][4096], rlk41[NUM_PRIME][4096];
//mpz_t quotient[THREADS], rem[THREADS];
//mpz_t temp_array64[THREADS];
//mpz_t chunk[THREADS];
//mpz_t temp_array512[THREADS];
void read_keys()
{
FILE *fp;
int i;
static mpz_t big_array[4096];
mpz_array_init(big_array[0], 4096, 256);
///////////////////////////////////////////////////////////////////////////
/////////////////////// Public key reading /////////////////////////////
fp = fopen("sage_generated_key/pk0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, pk0[i], i);
}
fp = fopen("sage_generated_key/pk1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, pk1[i], i);
}
fp = fopen("sage_generated_key/sk", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, sk[i], i);
}
/*
fp = fopen("keys/pk0_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[0][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[1][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[2][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[3][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[4][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[5][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[6][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[0][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[1][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[2][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[3][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[4][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[5][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[6][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[0][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[1][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[2][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[3][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[4][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[5][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[6][i]);
fclose(fp);
*/
///////////////////////////////////////////////////////////////////////////
//////////////// Relinearisation key reading /////////////////////////////
fp = fopen("sage_generated_key/rlk0_0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk00[i], i);
}
fp = fopen("sage_generated_key/rlk0_1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk01[i], i);
}
fp = fopen("sage_generated_key/rlk1_0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk10[i], i);
}
fp = fopen("sage_generated_key/rlk1_1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk11[i], i);
}
/*
fp = fopen("keys/rlk00_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk00[i], i);
}
fp = fopen("keys/rlk01_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk01[i], i);
}
fp = fopen("keys/rlk10_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk10[i], i);
}
fp = fopen("keys/rlk11_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<7; i++)
{
compute_mod(big_array, rlk11[i], i);
}
fp = fopen("keys/rlk20_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk20[i], i);
}
fp = fopen("keys/rlk21_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk21[i], i);
}
fp = fopen("keys/rlk30_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk30[i], i);
}
fp = fopen("keys/rlk31_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk31[i], i);
}
fp = fopen("keys/rlk40_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk40[i], i);
}
fp = fopen("keys/rlk41_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk41[i], i);
}
*/
///////////////////////////////////////////////////////////////////////////
////////////////// Compute FFT of the keys /////////////////////////////
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(pk0[i], i);
fwd_ntt_q(pk1[i], i);
fwd_ntt_q(sk[i], i);
fwd_ntt_q(rlk00[i], i);
fwd_ntt_q(rlk01[i], i);
fwd_ntt_q(rlk10[i], i);
fwd_ntt_q(rlk11[i], i);
fwd_ntt_q(rlk20[i], i);
fwd_ntt_q(rlk21[i], i);
fwd_ntt_q(rlk30[i], i);
fwd_ntt_q(rlk31[i], i);
fwd_ntt_q(rlk40[i], i);
fwd_ntt_q(rlk41[i], i);
}
}
void FV_recrypt(long long int c0[][4096], long long int c1[][4096])
{
int m[4096];
FV_dec_q(m, c0, c1);
FV_enc_q(m, c0, c1);
}
void FV_enc_q(int m[], long long int c0[][4096], long long int c1[][4096])
{
int i, j, r;
long long int primrt;
long long int m_encoded[4096], e1[4096], e2[4096], u[4096], u_copy[4096], pk0_mul_u[4096], pk1_mul_u[4096], e1_plus_m_encoded[4096];
knuth_yao(e1);
knuth_yao(e2);
for(i=0; i<4096; i++)
{
r = rand() % 2;
if(rand()%2==1)
r = -r;
u[i] = r;
}
for(i=0; i<NUM_PRIME; i++)
{
for(j=0; j<4096; j++)
m_encoded[j] = m[j] * pby_t[i];
poly_copy(u, u_copy);
fwd_ntt_q(u_copy, i);
//fwd_ntt_q(pk0[i], i);
//fwd_ntt_q(pk1[i], i);
coefficient_mul_q(pk0[i], u_copy, pk0_mul_u, i);
coefficient_mul_q(pk1[i], u_copy, pk1_mul_u, i); // e1_plus_m_encoded <-- m_encoded + e1
inv_ntt_q(pk0_mul_u, i); // pk0_mul_u <-- pk0*u
inv_ntt_q(pk1_mul_u, i); // pk1_mul_u <-- pk1*u
coefficient_add_q(e1, m_encoded, e1_plus_m_encoded, i); // e1_plus_m_encoded <-- m_encoded + e1
coefficient_add_q(pk0_mul_u, e1_plus_m_encoded, c0[i], i); // c0[i] <-- pk0*u + e1 + m_encoded
coefficient_add_q(pk1_mul_u, e2, c1[i], i); // c1[i] <-- pk1*u + e2
}
}
/*
void create_crt_rom(mpz_t q[], int length)
{
int i, j;
mpz_t q_full, Ni, Ni_inv, temp;
mpz_init(q_full);
mpz_init(Ni);
mpz_init(Ni_inv);
mpz_init(temp);
mpz_t mask;
mpz_init(mask);
mpz_set_str(q_full, "1", 10);
mpz_set_str(mask, "262143", 10);
for(i=0; i<length; i++)
mpz_mul(q_full, q_full, q[i]);
for(j=0; j<length; j++)
{
mpz_fdiv_q(Ni, q_full, q[j]);
mpz_invert(Ni_inv, Ni, q[j]);
gmp_printf("mux8_18bits rom(18'd%Zd, ", Ni_inv);
for(i=0; i<length-1; i++)
{
mpz_and(temp, Ni, mask);
if(i<length-2)
gmp_printf("18'd%Zd, ", temp);
else
gmp_printf("18'd%Zd, 18'd0, 18'd0, address, dataout);\n\n", temp);
mpz_sub(Ni, Ni, temp);
mpz_fdiv_q_2exp(Ni, Ni, 18);
}
}
}
*/
void inverse_crt_length7(long long int c0[][4096], mpz_t c0_full[])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
for(j=0; j<NUM_PRIME; j++)
{
mpz_mul_ui(temp, Ni_length7[j], c0[j][i]);
mpz_mul(temp, temp, Ni_inv_length7[j]);
mpz_mod(temp, temp, p_full_length7); // temp = c0[i][j]*Ni*Ni_inv mod q_full
if(j==0)
mpz_set(c0_full[i], temp);
else
mpz_add(c0_full[i], c0_full[i], temp);
}
mpz_mod(c0_full[i], c0_full[i], p_full_length7);
}
}
void inverse_crt_length15(long long int c0[][4096], mpz_t c0_full[])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
for(j=0; j<NUM_PRIME_EXT; j++)
{
mpz_mul_ui(temp, Ni_length15[j], c0[j][i]);
mpz_mul(temp, temp, Ni_inv_length15[j]);
mpz_mod(temp, temp, p_full_length15); // temp = c0[i][j]*Ni*Ni_inv mod q_full
if(j==0)
mpz_set(c0_full[i], temp);
else
mpz_add(c0_full[i], c0_full[i], temp);
}
mpz_mod(c0_full[i], c0_full[i], p_full_length15);
}
}
int round_tx(mpz_t a[]) // computes round(t*c/q)
{
int i;
int thread_num;
mpz_t quotient, rem;
mpz_init(quotient);
mpz_init(rem);
//#pragma omp parallel for private(thread_num)
for(i=4095; i>=0; i--)
{
//thread_num = omp_get_thread_num();
mpz_mul_ui(a[i], a[i], t); // a[i] <-- a[i]*t
if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve
{
mpz_ui_sub(a[i], 0, a[i]);
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_ui_sub(a[i], 0, quotient);
}
else
{
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
//gmp_printf("quo rem p_full_length7_by2 %Zd %Zd %Zd\n", quotient, rem, p_full_length7_by2);
mpz_set(a[i], quotient);
}
}
}
int round_tx_mod(mpz_t a[]) // computes mod( round(t*c/q), q )
{
int i;
int thread_num;
mpz_t quotient, rem;
mpz_init(quotient);
mpz_init(rem);
//#pragma omp parallel for private(thread_num)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
mpz_mul_ui(a[i], a[i], t);
if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve
{
mpz_ui_sub(a[i], 0, a[i]);
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_ui_sub(a[i], 0, quotient);
mpz_mod(a[i], a[i], p_full_length7);
}
else
{
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_set(a[i], quotient);
mpz_mod(a[i], a[i], p_full_length7);
}
}
}
void FV_dec_q(int m[], long long int c0[][4096], long long int c1[][4096])
{
int i;
long long int sk_mul_c1[NUM_PRIME][4096];
mpz_t c1_full[4096];
mpz_t temp;
mpz_array_init(c1_full[0], 4096, 512);
mpz_init(temp);
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(c1[i], i);
coefficient_mul_q(sk[i], c1[i], sk_mul_c1[i], i);
inv_ntt_q(sk_mul_c1[i], i);
coefficient_add_q(c0[i], sk_mul_c1[i], sk_mul_c1[i], i); // sk_mul_c1 <-- c0 + sk_mul_c1
}
inverse_crt_length7(sk_mul_c1, c1_full);
centerlift(c1_full);
round_tx(c1_full); // round t*c/q
for(i=4095; i>=0; i--)
{
//if(mpz_cmp(c1_full[i], p_full_length7_by4)>=0 && mpz_cmp(c1_full[i], p_full_length7_by4_mul3)<0)
//m[i]=1;
//else
//m[i]=0;
mpz_mod_ui(temp, c1_full[i], t); // temp = c1_full[i] % t
m[i] = mpz_get_ui(temp);
}
mpz_clear(c1_full[0]);
}
int FV_add(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i;
for(i=0; i<NUM_PRIME; i++)
{
poly_add_q(c10[i], c20[i], c0[i], i);
poly_add_q(c11[i], c21[i], c1[i], i);
}
}
int FV_sub(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i;
for(i=0; i<NUM_PRIME; i++)
{
poly_sub_q(c10[i], c20[i], c0[i], i);
poly_sub_q(c11[i], c21[i], c1[i], i);
}
}
int FV_mul(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i, j, index;
FILE *fp;
long long int c10_QL[NUM_PRIME_EXT][4096], c11_QL[NUM_PRIME_EXT][4096], c20_QL[NUM_PRIME_EXT][4096], c21_QL[NUM_PRIME_EXT][4096], c2[NUM_PRIME_EXT][4096];
long long int c10_mul_c20[NUM_PRIME_EXT][4096], c10_mul_c21[NUM_PRIME_EXT][4096], c11_mul_c20[NUM_PRIME_EXT][4096], c11_mul_c21[NUM_PRIME_EXT][4096];
mpz_t c10_full[4096], c11_full[4096], c20_full[4096], c21_full[4096];
mpz_t c0_full[4096], c1_full[4096], c2_full[4096];
long long int primrt;
int num_thread;
mpz_array_init(c10_full[0], 4096, 512);
mpz_array_init(c11_full[0], 4096, 512);
mpz_array_init(c20_full[0], 4096, 512);
mpz_array_init(c21_full[0], 4096, 512);
mpz_array_init(c0_full[0], 4096, 512);
mpz_array_init(c1_full[0], 4096, 512);
mpz_array_init(c2_full[0], 4096, 512);
/*
for(i=0; i<NUM_PRIME; i++)
{
for(j=0; j<4096; j++)
{
c10[i][j] = j + 4096*i;
c11[i][j] = j + 4096*i;
c20[i][j] = j + 4096*i;
c21[i][j] = j + 4096*i;
}
}
*/
/*
fp = fopen("c11_shares", "w");
uint64_t a0, a1;
uint64_t a;
for(j=0; j<6; j++)
{
for(i=0; i<2048; i++)
{
a0 = c21[j][i]; a1 = c21[j][i+2048];
a = a0 + a1*1073741824;
if(i==0)
fprintf(fp, "{%lu,\n", a);
else if(i!=2047)
fprintf(fp, "%lu,\n", a);
else
fprintf(fp, "%lu},\n", a);
}
fprintf(fp, "\n");
}
fclose(fp);
*/
inverse_crt_length7(c10, c10_full);
inverse_crt_length7(c11, c11_full);
inverse_crt_length7(c20, c20_full);
inverse_crt_length7(c21, c21_full);
centerlift(c10_full);
centerlift(c11_full);
centerlift(c20_full);
centerlift(c21_full);
map_to_QL(c10_full, c10_QL);
map_to_QL(c11_full, c11_QL);
map_to_QL(c20_full, c20_QL);
map_to_QL(c21_full, c21_QL);
//#pragma omp parallel for
for(i=0; i<NUM_PRIME_EXT; i++)
{
fwd_ntt_q(c10_QL[i], i);
fwd_ntt_q(c11_QL[i], i);
fwd_ntt_q(c20_QL[i], i);
fwd_ntt_q(c21_QL[i], i);
coefficient_mul_q(c10_QL[i], c20_QL[i], c10_mul_c20[i], i);
coefficient_mul_q(c10_QL[i], c21_QL[i], c10_mul_c21[i], i);
coefficient_mul_q(c11_QL[i], c20_QL[i], c11_mul_c20[i], i);
coefficient_mul_q(c11_QL[i], c21_QL[i], c11_mul_c21[i], i);
inv_ntt_q(c10_mul_c20[i], i); // c0[i] = c10*c20 mod q[i]
poly_copy(c10_mul_c20[i], c0[i]);
coefficient_add_q(c10_mul_c21[i], c11_mul_c20[i], c1[i], i);
inv_ntt_q(c1[i], i); // c1[i] = c10*c21 mod q[i]
inv_ntt_q(c11_mul_c21[i], i); // c2[i] = c11*c21 mod q[i]
poly_copy(c11_mul_c21[i], c2[i]);
}
/*
printf("print c2 j=5 \n");
for(i=0; i<13; i++)
{
//for(j=0; j<2048; j++)
j = 5;
printf("%ld %ld ", c0[i][j+2048], c0[i][j]);
}
printf("print c0 end\n");
*/
inverse_crt_length15(c0, c0_full);
inverse_crt_length15(c1, c1_full);
inverse_crt_length15(c2, c2_full);
centerlift_QL(c0_full);
round_tx_mod(c0_full);
centerlift_QL(c1_full);
round_tx_mod(c1_full);
centerlift_QL(c2_full);
round_tx_mod(c2_full);
centerlift(c2_full);
compute_shares(c0_full, c0);
compute_shares(c1_full, c1);
FV_relin(c0, c1, c2_full);
/*
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
printf("%ld %ld\n", c0[i][j+2048], c0[i][j]);
}
}
*/
/*
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
printf("%ld %ld\n", c0[i][j+2048], c0[i][j]);
}
}
*/
/*
fp = fopen("c0_fpga.txt", "r");
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
fscanf(fp, "%ld %ld", &c0[i][j+2048], &c0[i][j]);
}
}
fclose(fp);
fp = fopen("c1_fpga.txt", "r");
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
fscanf(fp, "%ld %ld", &c1[i][j+2048], &c1[i][j]);
}
}
fclose(fp);
*/
mpz_clear(c10_full[0]);
mpz_clear(c11_full[0]);
mpz_clear(c20_full[0]);
mpz_clear(c21_full[0]);
mpz_clear(c0_full[0]);
mpz_clear(c1_full[0]);
mpz_clear(c2_full[0]);
}
int FV_relin(long long int c0_shares[][4096], long long int c1_shares[][4096], mpz_t c2_full[])
{
int i, j;
mpz_t cwd0[4096], cwd1[4096], cwd2[4096], cwd3[4096], cwd4[4096];
mpz_array_init(cwd0[0], 4096, 256);
mpz_array_init(cwd1[0], 4096, 256);
mpz_array_init(cwd2[0], 4096, 256);
mpz_array_init(cwd3[0], 4096, 256);
mpz_array_init(cwd4[0], 4096, 256);
long long int rlk0_mul_cwd[NUM_PRIME][4096], rlk1_mul_cwd[NUM_PRIME][4096];
long long int cwd0_shares[NUM_PRIME][4096], cwd1_shares[NUM_PRIME][4096], cwd2_shares[NUM_PRIME][4096], cwd3_shares[NUM_PRIME][4096], cwd4_shares[NUM_PRIME][4096];
long long int temp[NUM_PRIME][4096];
word_decomp(c2_full, cwd0, cwd1, cwd2, cwd3, cwd4);
compute_shares(cwd0, cwd0_shares);
compute_shares(cwd1, cwd1_shares);
//compute_shares(cwd2, cwd2_shares);
//compute_shares(cwd3, cwd3_shares);
//compute_shares(cwd4, cwd4_shares);
// Problem found for j = 5;
// correct value 930507122 606471640
// received value 700752289 606471640
/*
for(i=0; i<1; i++)
{
for(j=0; j<2048; j++)
{
printf("j=%d\n", j);
printf("%ld %ld ", cwd0_shares[i][j+2048], cwd0_shares[i][j]);
}
}
*/
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(cwd0_shares[i], i);
coefficient_mul_q(rlk00[i], cwd0_shares[i], rlk0_mul_cwd[i], i);
fwd_ntt_q(cwd1_shares[i], i);
coefficient_mul_q(rlk10[i], cwd1_shares[i], temp[i], i);
coefficient_add_q(rlk0_mul_cwd[i], temp[i], rlk0_mul_cwd[i], i); // rlk0_mul_cwd[i] = rlk00[i]*cwd0_shares[i] + rlk10[i]*cwd1_shares[i]
coefficient_mul_q(rlk01[i], cwd0_shares[i], rlk1_mul_cwd[i], i);
coefficient_mul_q(rlk11[i], cwd1_shares[i], temp[i], i);
coefficient_add_q(rlk1_mul_cwd[i], temp[i], rlk1_mul_cwd[i], i);
inv_ntt_q(rlk0_mul_cwd[i], i);
inv_ntt_q(rlk1_mul_cwd[i], i);
coefficient_add_q(c0_shares[i], rlk0_mul_cwd[i], c0_shares[i], i); // c0_shares[i] = c0_shares[i]+ sum[rlk_i0*cwd_i]
coefficient_add_q(c1_shares[i], rlk1_mul_cwd[i], c1_shares[i], i); // c1_shares[i] = c1_shares[i]+ sum[rlk_i1*cwd_i]
}
//printf("Relin c0_shares[0][0]=%ld\n",c0_shares[0][0]);
mpz_clear(cwd0[0]); mpz_clear(cwd1[0]); mpz_clear(cwd2[0]); mpz_clear(cwd3[0]); mpz_clear(cwd4[0]);
}
int word_decomp(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[])
{
int i, j;
int sign;
mpz_t mask; mpz_init(mask);
mpz_set_str(mask, "2475880078570760549798248447", 10); // mask=2^32-1
mpz_t two_to_32; mpz_init(two_to_32);
mpz_set_str(two_to_32, "2475880078570760549798248448", 10);
mpz_t two_to_31; mpz_init(two_to_31);
mpz_set_str(two_to_31, "1237940039285380274899124224", 10);
mpz_t chunk;
mpz_init(chunk);
int thread_num;
//#pragma omp parallel for private(thread_num, sign, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
sign=0;
if(mpz_cmp_ui(c[i], 0)<0)
{
sign = 1;
mpz_ui_sub(c[i], 0, c[i]);
}
for(j=0; j<2; j++)
{
mpz_and(chunk, c[i], mask);
mpz_sub(c[i], c[i], chunk);
mpz_fdiv_q_2exp(c[i], c[i], 91); // c[i] = c[i]>>91
/*
if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31
{
mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32
mpz_add_ui(c[i], c[i], 1);
}
*/
if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk
if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7);
if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7);
if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7);
if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7);
if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7);
}
}
}
int word_decomp_32bit(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[])
{
int i, j;
int sign;
mpz_t mask; mpz_init(mask);
mpz_set_str(mask, "4294967295", 10); // mask=2^32-1
mpz_t two_to_32; mpz_init(two_to_32);
mpz_set_str(two_to_32, "4294967296", 10);
mpz_t two_to_31; mpz_init(two_to_31);
mpz_set_str(two_to_31, "2147483648", 10);
mpz_t chunk;
mpz_init(chunk);
int thread_num;
//#pragma omp parallel for private(thread_num, sign, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
sign=0;
if(mpz_cmp_ui(c[i], 0)<0)
{
sign = 1;
mpz_ui_sub(c[i], 0, c[i]);
}
for(j=0; j<5; j++)
{
mpz_and(chunk, c[i], mask);
mpz_sub(c[i], c[i], chunk);
mpz_fdiv_q_2exp(c[i], c[i], 32); // c[i] = c[i]>>32
if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31
{
mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32
mpz_add_ui(c[i], c[i], 1);
}
if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk
if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7);
if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7);
if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7);
if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7);
if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7);
}
}
}
void compute_shares(mpz_t a[], long long int a_shares[][4096])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
for(j=0; j<NUM_PRIME; j++)
{
mpz_mod_ui(temp, a[i], p[j]);
a_shares[j][i] = mpz_get_ui(temp);
}
}
}
void compute_mod(mpz_t a[],long long int b[], int prime_index)
{
int i;
mpz_t temp; mpz_init(temp);
for(i=0; i<4096; i++)
{
mpz_mod_ui(temp, a[i], p[prime_index]);
b[i] = mpz_get_ui(temp);
}
}
int centerlift(mpz_t a[])
{
int i;
//#pragma omp parallel for
for(i=0; i<4096; i++)
{
if(mpz_cmp(a[i], p_full_length7_by2)>0)
mpz_sub(a[i], a[i], p_full_length7); // a[i] = a[i]-q
}
}
int centerlift_QL(mpz_t a[])
{
int i;
//#pragma omp parallel for
for(i=0; i<4096; i++)
{
if(mpz_cmp(a[i], p_full_length15_by2)>0)
mpz_sub(a[i], a[i], p_full_length15); // a[i] = a[i]-q
}
}
int map_to_QL(mpz_t a[], long long int b[][4096])
{
int i, j;
int thread_num;
mpz_t temp; mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
mpz_mod(a[i], a[i], p_full_length15);
for(j=0; j<NUM_PRIME_EXT; j++)
{
mpz_mod_ui(temp, a[i], p[j]);
b[j][i] = mpz_get_ui(temp);
}
}
}
void coefficient_mul_q(long long int a[], long long int b[], long long int c[], int prime_index)
{
int j;
for(j=0; j<4096; j++)
{
c[j] = mod(a[j] * b[j], prime_index);
}
}
void coefficient_add_q(long long int a[], long long int b[], long long int c[], int prime_index)
{
int j;
for(j=0; j<4096; j++)
{
c[j] = mod(a[j] + b[j], prime_index);
}
}
void message_gen(int m[])
{
FILE *fm;
int i, r1, r2;
for(i=0;i<4096;i++)
{
m[i]=0;
}
m[0]=random()%2;
}
void poly_copy(long long int a[], long long int b[])
{
int i;
for(i=0; i<4096; i++)
b[i] = a[i];
}
/*
void message_encrypt(int m, mpz_t c[])
{
int message[4096];
int i;
for(i=0; i<4096; i++)
message[i] = 0;
message[0] = m;
YASHE_enc(message, c);
}
*/
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__isgt_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int16)
// A*D function (colscale): GB (_AxD__isgt_int16)
// D*A function (rowscale): GB (_DxB__isgt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int16)
// C=scalar+B GB (_bind1st__isgt_int16)
// C=scalar+B' GB (_bind1st_tran__isgt_int16)
// C=A+scalar GB (_bind2nd__isgt_int16)
// C=A'+scalar GB (_bind2nd_tran__isgt_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_hello.c | /* a simple openMP program */
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]){
int num_threads = 999999;
// omp_set_num_threads(4);
// or use num_threads(4) as part of the pragma below
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
#pragma omp master
{
num_threads = omp_get_num_threads();
}
#pragma omp barrier
printf("Hello from thread %d nthread %d\n", thread_id, num_threads);
} // End of Parallel region
return 0;
}
|
reduction2.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=20, a[n],sumalocal,suma=10;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]);
if (n>20) {n=20; printf("n=%d",n);}
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel private(sumalocal)
{
sumalocal=0;
#pragma omp for
for (i=0; i<n; i++)
sumalocal += a[i];
#pragma omp critical
suma = suma+sumalocal;
}
printf("Tras 'parallel' suma=%d\n",suma);
}
|
DRB013-nowait-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This example is extracted from a paper:
Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013
Some threads may finish the for loop early and execute errors = dt[9]+1
while another thread may still be simultaneously executing
the for worksharing region by writing to d[9], causing data races.
Data race pair: a[i]@72:7 vs. a[9]@75:13.
*/
#include <stdio.h>
#include <omp.h>
int main()
{
int i;
int error;
int len = 1000;
int a[len];
int b = 5;
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = i;
}
{
#pragma omp parallel for private (i) firstprivate (len,b)
for (i = 0; i <= len - 1; i += 1) {
a[i] = b + a[i] * 5;
}
error = a[9] + 1;
}
printf("error = %d\n",error);
return 0;
}
|
quick_gemm.h | // Quick gemm if alpha==0.0
if ( alpha==zero ) {
if ( beta==zero )
if ( orderC=='C' ) {
#pragma omp parallel
for (int jc=0; jc<n; jc++ )
for (int ic=0; ic<m; ic++ )
Ccol(ic,jc) = 0.0;
}
else {
#pragma omp parallel
for (int jc=0; jc<n; jc++ )
for (int ic=0; ic<m; ic++ )
Crow(ic,jc) = 0.0;
}
else
if ( orderC=='C' ) {
#pragma omp parallel
for (int jc=0; jc<n; jc++ )
for (int ic=0; ic<m; ic++ )
Ccol(ic,jc) = beta*Ccol(ic,jc);
}
else {
#pragma omp parallel
for (int jc=0; jc<n; jc++ )
for (int ic=0; ic<m; ic++ )
Crow(ic,jc) = beta*Crow(ic,jc);
}
return;
}
|
aurora_runtime_kernels.c | #include <omp.h>
#include <pthread.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
void
ve_init()
{
#pragma omp parallel
{
auto tx = omp_get_thread_num();
cpu_set_t set;
memset(&set, 0, sizeof(cpu_set_t));
set.__bits[0] = 1 << tx;
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
}
}
void
ve_close()
{
exit(0);
}
uint64_t
ve_helper_malloc(const size_t sz)
{
return (uint64_t)malloc(sz);
}
void
ve_helper_free(uint64_t addr)
{
free((void *)addr);
}
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const signed char* kernel = _kernel;
#if __ARM_NEON && __aarch64__
// kernel memory packed 8 x 8
kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4, (size_t)1u);
#else
// kernel memory packed 4 x 8
kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u);
#endif
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
const signed char* k4 = kernel + (p+4)*inch*kernel_size;
const signed char* k5 = kernel + (p+5)*inch*kernel_size;
const signed char* k6 = kernel + (p+6)*inch*kernel_size;
const signed char* k7 = kernel + (p+7)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/8);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
#endif
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
#if __ARM_NEON && __aarch64__
signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4);
#else
signed char* ktmp = kernel_tm.channel(p/4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
#if __ARM_NEON && __aarch64__
signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4);
#else
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, 1UL, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
signed char* ret = (signed char*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 8 x 8
Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8);
for (int q=0; q<inch*kernel_size; q++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "cc", "memory", "v0"
);
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "cc", "memory", "d0"
);
#endif // __aarch64__
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __ARM_NEON
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8 + i%8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 8;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int* output4 = top_blob.channel(i+4);
int* output5 = top_blob.channel(i+5);
int* output6 = top_blob.channel(i+6);
int* output7 = top_blob.channel(i+7);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
const signed char* va = kernel_tm.channel(i/8);
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"eor v18.16b, v18.16b, v18.16b \n" // sum1
"eor v19.16b, v19.16b, v19.16b \n" // sum1n
"eor v20.16b, v20.16b, v20.16b \n" // sum2
"eor v21.16b, v21.16b, v21.16b \n" // sum2n
"eor v22.16b, v22.16b, v22.16b \n" // sum3
"eor v23.16b, v23.16b, v23.16b \n" // sum3n
"eor v24.16b, v24.16b, v24.16b \n" // sum4
"eor v25.16b, v25.16b, v25.16b \n" // sum4n
"eor v26.16b, v26.16b, v26.16b \n" // sum5
"eor v27.16b, v27.16b, v27.16b \n" // sum5n
"eor v28.16b, v28.16b, v28.16b \n" // sum6
"eor v29.16b, v29.16b, v29.16b \n" // sum6n
"eor v30.16b, v30.16b, v30.16b \n" // sum7
"eor v31.16b, v31.16b, v31.16b \n" // sum7n
"lsr w4, %w20, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%8], #32 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v1.8h, v1.8b, #0 \n" // k01 - k71
"sshll v2.8h, v2.8b, #0 \n" // k02 - k72
"sshll v3.8h, v3.8b, #0 \n" // k03 - k73
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40
"smlal2 v25.4s, v8.8h, v0.h[4] \n"//
"smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50
"smlal2 v27.4s, v8.8h, v0.h[5] \n"//
"smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60
"smlal2 v29.4s, v8.8h, v0.h[6] \n"//
"smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70
"smlal2 v31.4s, v8.8h, v0.h[7] \n"//
// k1
"smlal v16.4s, v9.4h, v1.h[0] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v1.h[0] \n"//
"smlal v18.4s, v9.4h, v1.h[1] \n"// sum1 += (a01-a71) * k11
"smlal2 v19.4s, v9.8h, v1.h[1] \n"//
"smlal v20.4s, v9.4h, v1.h[2] \n"// sum2 += (a01-a71) * k21
"smlal2 v21.4s, v9.8h, v1.h[2] \n"//
"smlal v22.4s, v9.4h, v1.h[3] \n"// sum3 += (a01-a71) * k31
"smlal2 v23.4s, v9.8h, v1.h[3] \n"//
"smlal v24.4s, v9.4h, v1.h[4] \n"// sum4 += (a01-a71) * k41
"smlal2 v25.4s, v9.8h, v1.h[4] \n"//
"smlal v26.4s, v9.4h, v1.h[5] \n"// sum5 += (a01-a71) * k51
"smlal2 v27.4s, v9.8h, v1.h[5] \n"//
"smlal v28.4s, v9.4h, v1.h[6] \n"// sum6 += (a01-a71) * k61
"smlal2 v29.4s, v9.8h, v1.h[6] \n"//
"smlal v30.4s, v9.4h, v1.h[7] \n"// sum7 += (a01-a71) * k71
"smlal2 v31.4s, v9.8h, v1.h[7] \n"//
// k2
"smlal v16.4s, v10.4h, v2.h[0] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v2.h[0] \n"//
"smlal v18.4s, v10.4h, v2.h[1] \n"// sum1 += (a02-a72) * k12
"smlal2 v19.4s, v10.8h, v2.h[1] \n"//
"smlal v20.4s, v10.4h, v2.h[2] \n"// sum2 += (a02-a72) * k22
"smlal2 v21.4s, v10.8h, v2.h[2] \n"//
"smlal v22.4s, v10.4h, v2.h[3] \n"// sum3 += (a02-a72) * k32
"smlal2 v23.4s, v10.8h, v2.h[3] \n"//
"smlal v24.4s, v10.4h, v2.h[4] \n"// sum4 += (a02-a72) * k42
"smlal2 v25.4s, v10.8h, v2.h[4] \n"//
"smlal v26.4s, v10.4h, v2.h[5] \n"// sum5 += (a02-a72) * k52
"smlal2 v27.4s, v10.8h, v2.h[5] \n"//
"smlal v28.4s, v10.4h, v2.h[6] \n"// sum6 += (a02-a72) * k62
"smlal2 v29.4s, v10.8h, v2.h[6] \n"//
"smlal v30.4s, v10.4h, v2.h[7] \n"// sum7 += (a02-a72) * k72
"smlal2 v31.4s, v10.8h, v2.h[7] \n"//
// k3
"smlal v16.4s, v11.4h, v3.h[0] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v3.h[0] \n"//
"smlal v18.4s, v11.4h, v3.h[1] \n"// sum1 += (a03-a73) * k13
"smlal2 v19.4s, v11.8h, v3.h[1] \n"//
"smlal v20.4s, v11.4h, v3.h[2] \n"// sum2 += (a03-a73) * k23
"smlal2 v21.4s, v11.8h, v3.h[2] \n"//
"smlal v22.4s, v11.4h, v3.h[3] \n"// sum3 += (a03-a73) * k33
"smlal2 v23.4s, v11.8h, v3.h[3] \n"//
"smlal v24.4s, v11.4h, v3.h[4] \n"// sum4 += (a03-a73) * k43
"smlal2 v25.4s, v11.8h, v3.h[4] \n"//
"smlal v26.4s, v11.4h, v3.h[5] \n"// sum5 += (a03-a73) * k53
"smlal2 v27.4s, v11.8h, v3.h[5] \n"//
"smlal v28.4s, v11.4h, v3.h[6] \n"// sum6 += (a03-a73) * k63
"smlal2 v29.4s, v11.8h, v3.h[6] \n"//
"smlal v30.4s, v11.4h, v3.h[7] \n"// sum7 += (a03-a73) * k73
"smlal2 v31.4s, v11.8h, v3.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b}, [%9], #8 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.8b}, [%8], #8 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40
"smlal2 v25.4s, v8.8h, v0.h[4] \n"//
"smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50
"smlal2 v27.4s, v8.8h, v0.h[5] \n"//
"smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60
"smlal2 v29.4s, v8.8h, v0.h[6] \n"//
"smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70
"smlal2 v31.4s, v8.8h, v0.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
"st1 {v18.4s, v19.4s}, [%1] \n"
"st1 {v20.4s, v21.4s}, [%2] \n"
"st1 {v22.4s, v23.4s}, [%3] \n"
"st1 {v24.4s, v25.4s}, [%4] \n"
"st1 {v26.4s, v27.4s}, [%5] \n"
"st1 {v28.4s, v29.4s}, [%6] \n"
"st1 {v30.4s, v31.4s}, [%7] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(output4), // %4
"=r"(output5), // %5
"=r"(output6), // %6
"=r"(output7), // %7
"=r"(vb), // %8
"=r"(va) // %9
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(output4),
"5"(output5),
"6"(output6),
"7"(output7),
"8"(vb),
"9"(va),
"r"(L) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else
int sum0[8] = {0};
int sum1[8] = {0};
int sum2[8] = {0};
int sum3[8] = {0};
int sum4[8] = {0};
int sum5[8] = {0};
int sum6[8] = {0};
int sum7[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
sum4[n] += (int)va[4] * vb[n];
sum5[n] += (int)va[5] * vb[n];
sum6[n] += (int)va[6] * vb[n];
sum7[n] += (int)va[7] * vb[n];
va += 8;
sum0[n] += (int)va[0] * vb[n+8];
sum1[n] += (int)va[1] * vb[n+8];
sum2[n] += (int)va[2] * vb[n+8];
sum3[n] += (int)va[3] * vb[n+8];
sum4[n] += (int)va[4] * vb[n+8];
sum5[n] += (int)va[5] * vb[n+8];
sum6[n] += (int)va[6] * vb[n+8];
sum7[n] += (int)va[7] * vb[n+8];
va += 8;
sum0[n] += (int)va[0] * vb[n+16];
sum1[n] += (int)va[1] * vb[n+16];
sum2[n] += (int)va[2] * vb[n+16];
sum3[n] += (int)va[3] * vb[n+16];
sum4[n] += (int)va[4] * vb[n+16];
sum5[n] += (int)va[5] * vb[n+16];
sum6[n] += (int)va[6] * vb[n+16];
sum7[n] += (int)va[7] * vb[n+16];
va += 8;
sum0[n] += (int)va[0] * vb[n+24];
sum1[n] += (int)va[1] * vb[n+24];
sum2[n] += (int)va[2] * vb[n+24];
sum3[n] += (int)va[3] * vb[n+24];
sum4[n] += (int)va[4] * vb[n+24];
sum5[n] += (int)va[5] * vb[n+24];
sum6[n] += (int)va[6] * vb[n+24];
sum7[n] += (int)va[7] * vb[n+24];
va += 8;
sum0[n] += (int)va[0] * vb[n+32];
sum1[n] += (int)va[1] * vb[n+32];
sum2[n] += (int)va[2] * vb[n+32];
sum3[n] += (int)va[3] * vb[n+32];
sum4[n] += (int)va[4] * vb[n+32];
sum5[n] += (int)va[5] * vb[n+32];
sum6[n] += (int)va[6] * vb[n+32];
sum7[n] += (int)va[7] * vb[n+32];
va += 8;
sum0[n] += (int)va[0] * vb[n+40];
sum1[n] += (int)va[1] * vb[n+40];
sum2[n] += (int)va[2] * vb[n+40];
sum3[n] += (int)va[3] * vb[n+40];
sum4[n] += (int)va[4] * vb[n+40];
sum5[n] += (int)va[5] * vb[n+40];
sum6[n] += (int)va[6] * vb[n+40];
sum7[n] += (int)va[7] * vb[n+40];
va += 8;
sum0[n] += (int)va[0] * vb[n+48];
sum1[n] += (int)va[1] * vb[n+48];
sum2[n] += (int)va[2] * vb[n+48];
sum3[n] += (int)va[3] * vb[n+48];
sum4[n] += (int)va[4] * vb[n+48];
sum5[n] += (int)va[5] * vb[n+48];
sum6[n] += (int)va[6] * vb[n+48];
sum7[n] += (int)va[7] * vb[n+48];
va += 8;
sum0[n] += (int)va[0] * vb[n+56];
sum1[n] += (int)va[1] * vb[n+56];
sum2[n] += (int)va[2] * vb[n+56];
sum3[n] += (int)va[3] * vb[n+56];
sum4[n] += (int)va[4] * vb[n+56];
sum5[n] += (int)va[5] * vb[n+56];
sum6[n] += (int)va[6] * vb[n+56];
sum7[n] += (int)va[7] * vb[n+56];
va -= 56;
}
va += 64;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
sum4[n] += (int)va[4] * vb[n];
sum5[n] += (int)va[5] * vb[n];
sum6[n] += (int)va[6] * vb[n];
sum7[n] += (int)va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __aarch64__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j<N; j++)
{
signed char* vb = bottom_tm.channel(j/8 + j%8);
const signed char* va = kernel_tm.channel(i/8);
#if __aarch64__
asm volatile(
"eor v14.16b, v14.16b, v14.16b \n" // sum0_3
"eor v15.16b, v15.16b, v15.16b \n" // sum4_7
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"eor v20.16b, v20.16b, v20.16b \n" // sum4
"eor v21.16b, v21.16b, v21.16b \n" // sum5
"eor v22.16b, v22.16b, v22.16b \n" // sum6
"eor v23.16b, v23.16b, v23.16b \n" // sum7
"lsr w4, %w20, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n" // k
//"prfm pldl1keep, [%8, #128] \n"
"ld1 {v4.8b}, [%8] \n" // d
"add %8, %8, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v1.8h, v1.8b, #0 \n" // k01 - k71
"sshll v2.8h, v2.8b, #0 \n" // k02 - k72
"sshll v3.8h, v3.8b, #0 \n" // k03 - k73
"sshll v4.8h, v4.8b, #0 \n" // a00 - a30
// k0
"smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k70) * a00
"smlal2 v17.4s, v0.8h, v4.h[0] \n"//
"smlal v18.4s, v1.4h, v4.h[1] \n"// sum1 += (k01-k71) * a10
"smlal2 v19.4s, v1.8h, v4.h[1] \n"//
"smlal v20.4s, v2.4h, v4.h[2] \n"// sum2 += (k02-k72) * a20
"smlal2 v21.4s, v2.8h, v4.h[2] \n"//
"smlal v22.4s, v3.4h, v4.h[3] \n"// sum3 += (k03-k73) * a30
"smlal2 v23.4s, v3.8h, v4.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"add v16.4s, v16.4s, v18.4s \n"
"add v17.4s, v17.4s, v19.4s \n"
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v14.4s, v16.4s, v20.4s \n"
"add v15.4s, v17.4s, v21.4s \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b}, [%9], #8 \n"
//"prfm pldl1keep, [%8, #128] \n"
"ld1 {v4.8b}, [%8] \n"
"add %8, %8, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v4.8h, v4.8b, #0 \n" // a00
// k0
"smlal v14.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k70) * a00
"smlal2 v15.4s, v0.8h, v4.h[0] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v14.s}[0], [%0] \n"
"st1 {v14.s}[1], [%1] \n"
"st1 {v14.s}[2], [%2] \n"
"st1 {v14.s}[3], [%3] \n"
"st1 {v15.s}[0], [%4] \n"
"st1 {v15.s}[1], [%5] \n"
"st1 {v15.s}[2], [%6] \n"
"st1 {v15.s}[3], [%7] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(output4), // %4
"=r"(output5), // %5
"=r"(output6), // %6
"=r"(output7), // %7
"=r"(vb), // %8
"=r"(va) // %9
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(output4),
"5"(output5),
"6"(output6),
"7"(output7),
"8"(vb),
"9"(va),
"r"(L) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int k=0; k<L; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
sum4 += (int)va[4] * vb[0];
sum5 += (int)va[5] * vb[0];
sum6 += (int)va[6] * vb[0];
sum7 += (int)va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __aarch64__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
#endif // __ARM_NEON && __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4);
#else
const signed char* va = kernel_tm.channel(i/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"eor v18.16b, v18.16b, v18.16b \n" // sum1
"eor v19.16b, v19.16b, v19.16b \n" // sum1n
"eor v20.16b, v20.16b, v20.16b \n" // sum2
"eor v21.16b, v21.16b, v21.16b \n" // sum2n
"eor v22.16b, v22.16b, v22.16b \n" // sum3
"eor v23.16b, v23.16b, v23.16b \n" // sum3n
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b, v1.8b}, [%5], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%4], #32 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31
"sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
// k1
"smlal v16.4s, v9.4h, v0.h[4] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v0.h[4] \n"//
"smlal v18.4s, v9.4h, v0.h[5] \n"// sum1 += (a01-a71) * k11
"smlal2 v19.4s, v9.8h, v0.h[5] \n"//
"smlal v20.4s, v9.4h, v0.h[6] \n"// sum2 += (a01-a71) * k21
"smlal2 v21.4s, v9.8h, v0.h[6] \n"//
"smlal v22.4s, v9.4h, v0.h[7] \n"// sum3 += (a01-a71) * k31
"smlal2 v23.4s, v9.8h, v0.h[7] \n"//
// k2
"smlal v16.4s, v10.4h, v1.h[0] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v1.h[0] \n"//
"smlal v18.4s, v10.4h, v1.h[1] \n"// sum1 += (a02-a72) * k12
"smlal2 v19.4s, v10.8h, v1.h[1] \n"//
"smlal v20.4s, v10.4h, v1.h[2] \n"// sum2 += (a02-a72) * k22
"smlal2 v21.4s, v10.8h, v1.h[2] \n"//
"smlal v22.4s, v10.4h, v1.h[3] \n"// sum3 += (a02-a72) * k32
"smlal2 v23.4s, v10.8h, v1.h[3] \n"//
// k3
"smlal v16.4s, v11.4h, v1.h[4] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v1.h[4] \n"//
"smlal v18.4s, v11.4h, v1.h[5] \n"// sum1 += (a03-a73) * k13
"smlal2 v19.4s, v11.8h, v1.h[5] \n"//
"smlal v20.4s, v11.4h, v1.h[6] \n"// sum2 += (a03-a73) * k23
"smlal2 v21.4s, v11.8h, v1.h[6] \n"//
"smlal v22.4s, v11.4h, v1.h[7] \n"// sum3 += (a03-a73) * k33
"smlal2 v23.4s, v11.8h, v1.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b}, [%5] \n"
//"prfm pldl1keep, [%4, #128] \n"
"ld1 {v8.8b}, [%4], #8 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
"st1 {v18.4s, v19.4s}, [%1] \n"
"st1 {v20.4s, v21.4s}, [%2] \n"
"st1 {v22.4s, v23.4s}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
// K loop
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"vmov.s32 q14, #0 \n"
"vmov.s32 q15, #0 \n"
"lsr r4, %12, #3 \n"// r4 = nn = L >> 3
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d8-d11}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q7, d11 \n"// a30-a37
"vmovl.s8 q6, d10 \n"// a20-a27
"vmovl.s8 q5, d9 \n"// a10-a17
"vmovl.s8 q4, d8 \n"// a00-a07
"pld [%5, #128] \n"
"vld1.s8 {d0-d3}, [%5]! \n"// kptr k00-k30,k01-k31, k02-k32,k03-k33, k04-k34,k05-k35, k06-k36,k07-k37 k(outch)(inch)
"vmovl.s8 q3, d3 \n"// k06-k36,k07-k37
"vmovl.s8 q2, d2 \n"// k04-k34,k05-k35
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q8, d8, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q9, d9, d0[0] \n"
"vmlal.s16 q10, d8, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q11, d9, d0[1] \n"
"vmlal.s16 q12, d8, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q13, d9, d0[2] \n"
"vmlal.s16 q14, d8, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q15, d9, d0[3] \n"
"vmlal.s16 q8, d10, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q9, d11, d1[0] \n"
"vmlal.s16 q10, d10, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q11, d11, d1[1] \n"
"vmlal.s16 q12, d10, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q13, d11, d1[2] \n"
"vmlal.s16 q14, d10, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q15, d11, d1[3] \n"
"pld [%4, #128] \n"
"vld1.s8 {d8-d9}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d9 \n"// a10-a17
"vmovl.s8 q4, d8 \n"// a00-a07
"vmlal.s16 q8, d12, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q9, d13, d2[0] \n"
"vmlal.s16 q10, d12, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q11, d13, d2[1] \n"
"vmlal.s16 q12, d12, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q13, d13, d2[2] \n"
"vmlal.s16 q14, d12, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q15, d13, d2[3] \n"
"vmlal.s16 q8, d14, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q9, d15, d3[0] \n"
"vmlal.s16 q10, d14, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q11, d15, d3[1] \n"
"vmlal.s16 q12, d14, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q13, d15, d3[2] \n"
"vmlal.s16 q14, d14, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q15, d15, d3[3] \n"
"pld [%4, #128] \n"
"vld1.s8 {d0-d1}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q1, d1 \n"// a10-a17
"vmovl.s8 q0, d0 \n"// a00-a07
"vmlal.s16 q8, d8, d4[0] \n"// sum0 += (a40-a47) * k04
"vmlal.s16 q9, d9, d4[0] \n"
"vmlal.s16 q10, d8, d4[1] \n"// sum1 += (a40-a47) * k14
"vmlal.s16 q11, d9, d4[1] \n"
"vmlal.s16 q12, d8, d4[2] \n"// sum2 += (a40-a47) * k24
"vmlal.s16 q13, d9, d4[2] \n"
"vmlal.s16 q14, d8, d4[3] \n"// sum3 += (a40-a47) * k34
"vmlal.s16 q15, d9, d4[3] \n"
"vmlal.s16 q8, d10, d5[0] \n"// sum0 += (a50-a57) * k05
"vmlal.s16 q9, d11, d5[0] \n"
"vmlal.s16 q10, d10, d5[1] \n"// sum1 += (a50-a57) * k15
"vmlal.s16 q11, d11, d5[1] \n"
"vmlal.s16 q12, d10, d5[2] \n"// sum2 += (a50-a57) * k25
"vmlal.s16 q13, d11, d5[2] \n"
"vmlal.s16 q14, d10, d5[3] \n"// sum3 += (a50-a57) * k35
"vmlal.s16 q15, d11, d5[3] \n"
"vmlal.s16 q8, d0, d6[0] \n"// sum0 += (a60-a67) * k06
"vmlal.s16 q9, d1, d6[0] \n"
"vmlal.s16 q10, d0, d6[1] \n"// sum1 += (a60-a67) * k16
"vmlal.s16 q11, d1, d6[1] \n"
"vmlal.s16 q12, d0, d6[2] \n"// sum2 += (a60-a67) * k26
"vmlal.s16 q13, d1, d6[2] \n"
"vmlal.s16 q14, d0, d6[3] \n"// sum3 += (a60-a67) * k36
"vmlal.s16 q15, d1, d6[3] \n"
"vmlal.s16 q8, d2, d7[0] \n"// sum0 += (a70-a77) * k07
"vmlal.s16 q9, d3, d7[0] \n"
"vmlal.s16 q10, d2, d7[1] \n"// sum1 += (a70-a77) * k17
"vmlal.s16 q11, d3, d7[1] \n"
"vmlal.s16 q12, d2, d7[2] \n"// sum2 += (a70-a77) * k27
"vmlal.s16 q13, d3, d7[2] \n"
"vmlal.s16 q14, d2, d7[3] \n"// sum3 += (a70-a77) * k37
"vmlal.s16 q15, d3, d7[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #7 \n"// r4 = remain = inch & 7
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a70 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q8, d2, d0[0] \n"// sum0 += (a00-a70) * k00
"vmlal.s16 q9, d3, d0[0] \n"
"vmlal.s16 q10, d2, d0[1] \n"// sum1 += (a00-a70) * k10
"vmlal.s16 q11, d3, d0[1] \n"
"vmlal.s16 q12, d2, d0[2] \n"// sum2 += (a00-a70) * k20
"vmlal.s16 q13, d3, d0[2] \n"
"vmlal.s16 q14, d2, d0[3] \n"// sum3 += (a00-a70) * k30
"vmlal.s16 q15, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d16-d19}, [%0] \n"
"vst1.s32 {d20-d23}, [%1] \n"
"vst1.s32 {d24-d27}, [%2] \n"
"vst1.s32 {d28-d31}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
int sum0[8] = {0};
int sum1[8] = {0};
int sum2[8] = {0};
int sum3[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
va += 4;
sum0[n] += (int)va[0] * vb[n+8];
sum1[n] += (int)va[1] * vb[n+8];
sum2[n] += (int)va[2] * vb[n+8];
sum3[n] += (int)va[3] * vb[n+8];
va += 4;
sum0[n] += (int)va[0] * vb[n+16];
sum1[n] += (int)va[1] * vb[n+16];
sum2[n] += (int)va[2] * vb[n+16];
sum3[n] += (int)va[3] * vb[n+16];
va += 4;
sum0[n] += (int)va[0] * vb[n+24];
sum1[n] += (int)va[1] * vb[n+24];
sum2[n] += (int)va[2] * vb[n+24];
sum3[n] += (int)va[3] * vb[n+24];
va += 4;
sum0[n] += (int)va[0] * vb[n+32];
sum1[n] += (int)va[1] * vb[n+32];
sum2[n] += (int)va[2] * vb[n+32];
sum3[n] += (int)va[3] * vb[n+32];
va += 4;
sum0[n] += (int)va[0] * vb[n+40];
sum1[n] += (int)va[1] * vb[n+40];
sum2[n] += (int)va[2] * vb[n+40];
sum3[n] += (int)va[3] * vb[n+40];
va += 4;
sum0[n] += (int)va[0] * vb[n+48];
sum1[n] += (int)va[1] * vb[n+48];
sum2[n] += (int)va[2] * vb[n+48];
sum3[n] += (int)va[3] * vb[n+48];
va += 4;
sum0[n] += (int)va[0] * vb[n+56];
sum1[n] += (int)va[1] * vb[n+56];
sum2[n] += (int)va[2] * vb[n+56];
sum3[n] += (int)va[3] * vb[n+56];
va -= 28;
}
va += 32;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __ARM_NEON
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j<N; j++)
{
signed char* vb = bottom_tm.channel(j/8 + j%8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4);
#else
const signed char* va = kernel_tm.channel(i/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v14.16b, v14.16b, v14.16b \n" // sum0_3
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b, v1.8b}, [%5], #16 \n" // k
//"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.8b}, [%4] \n" // d
"add %4, %4, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31
"sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33
"sshll v4.8h, v4.8b, #0 \n" // a00 - a30
"subs w4, w4, #1 \n"
// k0
"smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k30) * a00
"smlal2 v17.4s, v0.8h, v4.h[0] \n"// sum1 += (k01-k31) * a10
"smlal v18.4s, v1.4h, v4.h[1] \n"// sum2 += (k02-k32) * a20
"smlal2 v19.4s, v1.8h, v4.h[1] \n"// sum3 += (k03-k33) * a30
"bne 0b \n"
"add v16.4s, v16.4s, v18.4s \n"
"add v17.4s, v17.4s, v19.4s \n"
"add v14.4s, v16.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b}, [%5] \n"
//"prfm pldl1keep, [4, #128] \n"
"ld1 {v4.8b}, [%4] \n"
"add %4, %4, #1 \n"
"add %5, %5, #4 \n"
"subs w4, w4, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v4.8h, v4.8b, #0 \n" // a00
// k0
"smlal v14.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k30) * a00
"bne 2b \n"
"3: \n"
"st1 {v14.s}[0], [%0] \n"
"st1 {v14.s}[1], [%1] \n"
"st1 {v14.s}[2], [%2] \n"
"st1 {v14.s}[3], [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"vmov.s32 q14, #0 \n"
"lsr r4, %12, #3 \n"// r4 = nn = L >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d0}, [%4]! \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"vmovl.s8 q0, d0 \n"// a00-a07
"pld [%5, #128] \n"
"vld1.s8 {d2-d5}, [%5]! \n"// kptr k00-k30,k01-k31, k02-k32,k03-k33, k04-k34,k05-k35, k06-k36,k07-k37 k(outch)(inch)
"vmovl.s8 q4, d5 \n"// k06-k36,k07-k37
"vmovl.s8 q3, d4 \n"// k04-k34,k05-k35
"vmovl.s8 q2, d3 \n"// k02-k32,k03-k33
"vmovl.s8 q1, d2 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d2, d0[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d3, d0[1] \n"// (k01-k31) * a01
"vmlal.s16 q8, d4, d0[2] \n"// (k02-k32) * a02
"vmlal.s16 q9, d5, d0[3] \n"// (k03-k33) * a03
"vmlal.s16 q10, d6, d1[0] \n"// (k04-k34) * a04
"vmlal.s16 q11, d7, d1[1] \n"// (k05-k35) * a05
"vmlal.s16 q12, d8, d1[2] \n"// (k06-k36) * a06
"vmlal.s16 q13, d9, d1[3] \n"// (k07-k37) * a07
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q11, q11, q10 \n"
"vadd.s32 q13, q13, q12 \n"
"vadd.s32 q9, q9, q6 \n"
"vadd.s32 q13, q13, q11 \n"
"vadd.s32 q14, q13, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #7 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q14, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d28[0]}, [%0] \n"
"vst1.s32 {d28[1]}, [%1] \n"
"vst1.s32 {d29[0]}, [%2] \n"
"vst1.s32 {d29[1]}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
#endif // __aarch64__
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int k=0; k<L; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __ARM_NEON
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
#else
const signed char* va = kernel_tm.channel(i/4 + i%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"lsr w4, %w6, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8b}, [%2] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%1], #32 \n"
"add %2, %2, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k03
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
// k1
"smlal v16.4s, v9.4h, v0.h[1] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v0.h[1] \n"//
// k2
"smlal v16.4s, v10.4h, v0.h[2] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v0.h[2] \n"//
// k3
"smlal v16.4s, v11.4h, v0.h[3] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v0.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8b}, [%2] \n"
//"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.8b}, [%1], #8 \n"
"add %2, %2, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
: "=r"(output), // %0
"=r"(vb), // %1
"=r"(va) // %2
: "0"(output),
"1"(vb),
"2"(va),
"r"(L) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"
);
#else
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #3 \n"// r4 = nn = inch >> 3
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"pld [%2, #128] \n"
"vld1.s8 {d0}, [%2]! \n"// kptr k00-k07 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k04,k05,k06,k07
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a40-a47,a50-a57,a60-a67,a70-a77 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a70-a77
"vmovl.s8 q4, d6 \n"// a60-a67
"vmovl.s8 q3, d5 \n"// a50-a57
"vmovl.s8 q2, d4 \n"// a40-a47
"vmlal.s16 q6, d4, d1[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d1[0] \n"
"vmlal.s16 q6, d6, d1[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d1[1] \n"
"vmlal.s16 q6, d8, d1[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d1[2] \n"
"vmlal.s16 q6, d10, d1[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d1[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #7 \n"// r4 = remain = inch & 7
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0] \n"
: "=r"(output), // %0
"=r"(vb), // %1
"=r"(va) // %2
: "0"(output),
"1"(vb),
"2"(va),
"r"(L) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#endif // __aarch64__
#else
int sum[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
sum[n] += (int)va[1] * vb[n+8];
sum[n] += (int)va[2] * vb[n+16];
sum[n] += (int)va[3] * vb[n+24];
sum[n] += (int)va[4] * vb[n+32];
sum[n] += (int)va[5] * vb[n+40];
sum[n] += (int)va[6] * vb[n+48];
sum[n] += (int)va[7] * vb[n+56];
}
va += 8;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n=0; n<8; n++)
{
output[n] = sum[n];
}
#endif // __ARM_NEON
output += 8;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/8 + j%8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
#else
const signed char* va = kernel_tm.channel(i/4 + i%4);
#endif // __ARM_NEON && __aarch64__
for (int k=0; k<L; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
using AttrMatcher = internal::Matcher<Attr>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the statement are expanded from different
/// appearances of the macro.
AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches decomposition-declarations.
///
/// Examples matches the declaration node with \c foo and \c bar, but not
/// \c number.
/// (matcher = declStmt(has(decompositionDecl())))
///
/// \code
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl>
decompositionDecl;
/// Matches binding declarations
/// Example matches \c foo and \c bar
/// (matcher = bindingDecl()
///
/// \code
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl>
bindingDecl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches class bases.
///
/// Examples matches \c public virtual B.
/// \code
/// class B {};
/// class C : public virtual B {};
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches template template parameter declarations.
///
/// Given
/// \code
/// template <template <typename> class Z, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'Z', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
TemplateTemplateParmDecl>
templateTemplateParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches an entity that has been implicitly added by the compiler (e.g.
/// implicit default/copy constructors).
AST_POLYMORPHIC_MATCHER(isImplicit,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Attr)) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder) != List.end();
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreUnlessSpelledInSource,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename... P> class MatcherT, typename... P,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>
traverse(TraversalKind TK,
const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK,
InnerMatcher);
}
template <typename... T>
internal::Matcher<typename internal::GetClade<T...>::Type>
traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) {
return traverse(TK, InnerMatcher.with());
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for a.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that refers to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return toString(Node.getAsIntegral(), 10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using-enum declarations.
///
/// Given
/// \code
/// namespace X { enum x {...}; }
/// using enum X::x;
/// \endcode
/// usingEnumDecl()
/// matches \code using enum X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl>
usingEnumDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
/// See also the binaryOperation() matcher for more-general matching of binary
/// uses of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches rewritten binary operators
///
/// Example matches use of "<":
/// \code
/// #include <compare>
/// struct HasSpaceshipMem {
/// int a;
/// constexpr auto operator<=>(const HasSpaceshipMem&) const = default;
/// };
/// void compare() {
/// HasSpaceshipMem hs1, hs2;
/// if (hs1 < hs2)
/// return;
/// }
/// \endcode
/// See also the binaryOperation() matcher for more-general matching
/// of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXRewrittenBinaryOperator>
cxxRewrittenBinaryOperator;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches co_return statements.
///
/// Given
/// \code
/// while (true) { co_return; }
/// \endcode
/// coreturnStmt()
/// matches 'co_return'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt>
coreturnStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches co_await expressions.
///
/// Given
/// \code
/// co_await 1;
/// \endcode
/// coawaitExpr()
/// matches 'co_await 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
coawaitExpr;
/// Matches co_await expressions where the type of the promise is dependent
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
dependentCoawaitExpr;
/// Matches co_yield expressions.
///
/// Given
/// \code
/// co_yield 1;
/// \endcode
/// coyieldExpr()
/// matches 'co_yield 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
coyieldExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches C11 _Generic expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr>
genericSelectionExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
/// See also the binaryOperation() matcher for more-general matching.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches any of the \p NodeMatchers with InnerMatchers nested within
///
/// Given
/// \code
/// if (true);
/// for (; true; );
/// \endcode
/// with the matcher
/// \code
/// mapAnyOf(ifStmt, forStmt).with(
/// hasCondition(cxxBoolLiteralExpr(equals(true)))
/// ).bind("trueCond")
/// \endcode
/// matches the \c if and the \c for. It is equivalent to:
/// \code
/// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true)));
/// anyOf(
/// ifStmt(trueCond).bind("trueCond"),
/// forStmt(trueCond).bind("trueCond")
/// );
/// \endcode
///
/// The with() chain-call accepts zero or more matchers which are combined
/// as-if with allOf() in each of the node matchers.
/// Usable as: Any Matcher
template <typename T, typename... U>
auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) {
return internal::MapAnyOfHelper<U...>();
}
/// Matches nodes which can be used with binary operators.
///
/// The code
/// \code
/// var1 != var2;
/// \endcode
/// might be represented in the clang AST as a binaryOperator, a
/// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on
///
/// * whether the types of var1 and var2 are fundamental (binaryOperator) or at
/// least one is a class type (cxxOperatorCallExpr)
/// * whether the code appears in a template declaration, if at least one of the
/// vars is a dependent-type (binaryOperator)
/// * whether the code relies on a rewritten binary operator, such as a
/// spaceship operator or an inverted equality operator
/// (cxxRewrittenBinaryOperator)
///
/// This matcher elides details in places where the matchers for the nodes are
/// compatible.
///
/// Given
/// \code
/// binaryOperation(
/// hasOperatorName("!="),
/// hasLHS(expr().bind("lhs")),
/// hasRHS(expr().bind("rhs"))
/// )
/// \endcode
/// matches each use of "!=" in:
/// \code
/// struct S{
/// bool operator!=(const S&) const;
/// };
///
/// void foo()
/// {
/// 1 != 2;
/// S() != S();
/// }
///
/// template<typename T>
/// void templ()
/// {
/// 1 != 2;
/// T() != S();
/// }
/// struct HasOpEq
/// {
/// bool operator==(const HasOpEq &) const;
/// };
///
/// void inverse()
/// {
/// HasOpEq s1;
/// HasOpEq s2;
/// if (s1 != s2)
/// return;
/// }
///
/// struct HasSpaceship
/// {
/// bool operator<=>(const HasOpEq &) const;
/// };
///
/// void use_spaceship()
/// {
/// HasSpaceship s1;
/// HasSpaceship s2;
/// if (s1 != s2)
/// return;
/// }
/// \endcode
extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator>
binaryOperation;
/// Matches function calls and constructor calls
///
/// Because CallExpr and CXXConstructExpr do not share a common
/// base class with API accessing arguments etc, AST Matchers for code
/// which should match both are typically duplicated. This matcher
/// removes the need for duplication.
///
/// Given code
/// \code
/// struct ConstructorTakesInt
/// {
/// ConstructorTakesInt(int i) {}
/// };
///
/// void callTakesInt(int i)
/// {
/// }
///
/// void doCall()
/// {
/// callTakesInt(42);
/// }
///
/// void doConstruct()
/// {
/// ConstructorTakesInt cti(42);
/// }
/// \endcode
///
/// The matcher
/// \code
/// invocation(hasArgument(0, integerLiteral(equals(42))))
/// \endcode
/// matches the expression in both doCall and doConstruct
extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>({std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadedOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches template-dependent, but known, member names.
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the known name of members.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()`
AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) {
return Node.getMember().getAsString() == N;
}
/// Matches template-dependent, but known, member names against an already-bound
/// node
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the name of already-bound VarDecl, FieldDecl
/// and CXXMethodDecl nodes.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// The matcher
/// @code
/// \c cxxDependentScopeMemberExpr(
/// hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
/// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has(
/// cxxMethodDecl(hasName("mem")).bind("templMem")
/// )))))
/// )))),
/// memberHasSameNameAsBoundNode("templMem")
/// )
/// @endcode
/// first matches and binds the @c mem member of the @c S template, then
/// compares its name to the usage in @c s.mem() in the @c x function template
AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode,
std::string, BindingID) {
auto MemberName = Node.getMember().getAsString();
return Builder->removeBindings(
[this, MemberName](const BoundNodesMap &Nodes) {
const auto &BN = Nodes.getNode(this->BindingID);
if (const auto *ND = BN.get<NamedDecl>()) {
if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND))
return true;
return ND->getName() != MemberName;
}
return true;
});
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
BoundNodesTreeBuilder Result(*Builder);
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, &Result);
if (MatchIt == Node.method_end())
return false;
if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit())
return false;
*Builder = std::move(Result);
return true;
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>(
InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// asString("class X")))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl, CXXBaseSpecifier),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// cxxRecordDecl(hasName("X"))))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of a node matches the inner matcher.
///
/// Examples:
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
///
/// \code
/// auto x = int(3);
/// \code
/// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int"))))
/// matches int(3)
///
/// \code
/// struct Foo { Foo(int, int); };
/// auto x = Foo(1, 2);
/// \code
/// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo"))))
/// matches Foo(1, 2)
///
/// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>,
/// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>,
/// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>,
/// Matcher<CXXUnresolvedConstructExpr>,
/// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>,
/// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>,
/// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>,
/// Matcher<TypedefNameDecl>
AST_POLYMORPHIC_MATCHER_P(
hasTypeLoc,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr,
CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr,
ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl,
ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc,
TypedefNameDecl),
internal::Matcher<TypeLoc>, Inner) {
TypeSourceInfo *source = internal::GetTypeSourceInfo(Node);
if (source == nullptr) {
// This happens for example for implicit destructors.
return false;
}
return Inner.matches(source->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder,
Builder) != Node.decls_end();
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N) {
unsigned NumArgs = Node.getNumArgs();
if (!Finder->isTraversalIgnoringImplicitNodes())
return NumArgs == N;
while (NumArgs) {
if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1)))
break;
--NumArgs;
}
return NumArgs == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
if (N >= Node.getNumArgs())
return false;
const Expr *Arg = Node.getArg(N);
if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg))
return false;
return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
if (MatchIt == Node.init_end())
return false;
return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes();
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
if (Finder->isTraversalIgnoringImplicitNodes() &&
isa<CXXDefaultArgExpr>(Arg))
break;
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches all arguments and their respective types for a \c CallExpr or
/// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but
/// it works on calls through function pointers as well.
///
/// The difference is, that function pointers do not provide access to a
/// \c ParmVarDecl, but only the \c QualType for each argument.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// void (*f_ptr)(int) = f;
/// f_ptr(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParamType(
/// declRefExpr(to(varDecl(hasName("y")))),
/// qualType(isInteger()).bind("type)
/// ))
/// matches f(y) and f_ptr(y)
/// with declRefExpr(...)
/// matching int y
/// and qualType(...)
/// matching int
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<QualType>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
const FunctionProtoType *FProto = nullptr;
if (const auto *Call = dyn_cast<CallExpr>(&Node)) {
if (const auto *Value =
dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) {
QualType QT = Value->getType().getCanonicalType();
// This does not necessarily lead to a `FunctionProtoType`,
// e.g. K&R functions do not have a function prototype.
if (QT->isFunctionPointerType())
FProto = QT->getPointeeType()->getAs<FunctionProtoType>();
if (QT->isMemberFunctionPointerType()) {
const auto *MP = QT->getAs<MemberPointerType>();
assert(MP && "Must be member-pointer if its a memberfunctionpointer");
FProto = MP->getPointeeType()->getAs<FunctionProtoType>();
assert(FProto &&
"The call must have happened through a member function "
"pointer");
}
}
}
int ParamIndex = 0;
bool Matched = false;
unsigned NumArgs = Node.getNumArgs();
if (FProto && FProto->isVariadic())
NumArgs = std::min(NumArgs, FProto->getNumParams());
for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
if (FProto) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, hasType(ParamMatcher))))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, hasType(ParamMatcher)))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder,
Builder) != Node.param_end();
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches weak function declarations.
///
/// Given:
/// \code
/// void foo() __attribute__((__weakref__("__foo")));
/// void bar();
/// \endcode
/// functionDecl(isWeak())
/// matches the weak declaration "foo", but not "bar".
AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); }
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body. Note that in case of functions
/// this matcher only matches the definition itself and not the other
/// declarations of the same function.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
///
/// Given
/// \code
/// void f();
/// void f() {}
/// \endcode
/// hasBody(functionDecl())
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node))
return false;
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches a function declaration that has a given body present in the AST.
/// Note that this matcher matches all the declarations of a function whose
/// body is present in the AST.
///
/// Given
/// \code
/// void f();
/// void f() {}
/// void g();
/// \endcode
/// functionDecl(hasAnyBody(compoundStmt()))
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void g();'
AST_MATCHER_P(FunctionDecl, hasAnyBody,
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = Node.getBody();
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder,
Builder) != CS->body_end();
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>(
Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::string, Name) {
if (Optional<StringRef> OpName = internal::getOpName(Node))
return *OpName == Name;
return false;
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::vector<std::string>>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = internal::getLHS(Node);
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = internal::getRHS(Node);
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
AST_POLYMORPHIC_MATCHER_P(
hasEitherOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, InnerMatcher) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(
hasOperands,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1))))
.matches(Node, Finder, Builder);
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator,
CXXOperatorCallExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Operand = internal::getSubExpr(Node);
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
AST_MATCHER(CXXConstructorDecl, isInheritingConstructor) {
return Node.isInheritingConstructor();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder,
Builder) != Node.shadow_end();
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whoes decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Matches attributes.
/// Attributes may be attached with a variety of different syntaxes (including
/// keywords, C++11 attributes, GNU ``__attribute``` and MSVC `__declspec``,
/// and ``#pragma``s). They may also be implicit.
///
/// Given
/// \code
/// struct [[nodiscard]] Foo{};
/// void bar(int * __attribute__((nonnull)) );
/// __declspec(noinline) void baz();
///
/// #pragma omp declare simd
/// int min();
/// \endcode
/// attr()
/// matches "nodiscard", "nonnull", "noinline", and the whole "#pragma" line.
extern const internal::VariadicAllOfMatcher<Attr> attr;
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten())
continue;
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
return anyOf(
gnuNullExpr(), cxxNullPtrLiteralExpr(),
integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
}
/// Matches the DecompositionDecl the binding belongs to.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// bindingDecl(hasName("f"),
/// forDecomposition(decompositionDecl())
/// \endcode
/// matches 'f' in 'auto &[f, s, t]'.
AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>,
InnerMatcher) {
if (const ValueDecl *VD = Node.getDecomposedDecl())
return InnerMatcher.matches(*VD, Finder, Builder);
return false;
}
/// Matches the Nth binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasBinding(0,
/// bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N,
internal::Matcher<BindingDecl>, InnerMatcher) {
if (Node.bindings().size() <= N)
return false;
return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder);
}
/// Matches any binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>,
InnerMatcher) {
return llvm::any_of(Node.bindings(), [&](const auto *Binding) {
return InnerMatcher.matches(*Binding, Finder, Builder);
});
}
/// Matches declaration of the function the statement belongs to.
///
/// Deprecated. Use forCallable() to correctly handle the situation when
/// the declaration is not a function (but a block or an Objective-C method).
/// forFunction() not only fails to take non-functions into account but also
/// may match the wrong declaration in their presence.
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while (!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
Builder)) {
return true;
}
} else {
for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches declaration of the function, method, or block the statement
/// belongs to.
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forCallable(functionDecl(hasName("operator="))))
/// matches 'return *this'
/// but does not match 'return v > 0'
///
/// Given:
/// \code
/// -(void) foo {
/// int x = 1;
/// dispatch_sync(queue, ^{ int y = 2; });
/// }
/// \endcode
/// declStmt(forCallable(objcMethodDecl()))
/// matches 'int x = 1'
/// but does not match 'int y = 2'.
/// whereas declStmt(forCallable(blockDecl()))
/// matches 'int y = 2'
/// but does not match 'int x = 1'.
AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while (!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
Builder)) {
return true;
}
} else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) {
if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) {
if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) {
return true;
}
} else {
for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder,
Builder) != Clauses.end();
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
memspeed.c | /*
cc memspeed.c cpuidc.c -lm -lrt -O3 -o memspeedIL
gcc memspeed.c cpuidc.c -lm -lrt -O3 -march=armv6 -mfloat-abi=hard -mfpu=vfp -o memspeedPiA6
#define version "32 Bit Version 4"
Compile time Raspberry Pi - 9 seconds+
gcc memspeed.c cpuidc.c -lm -lrt -O3 -mcpu=cortex-a7 -mfloat-abi=hard -mfpu=neon-vfpv4 -o memspeedPiA7
#define version "vfpv4 32 Bit Version 1"
gcc memspeed.c cpuidc.c -lm -lrt -O3 -mcpu=cortex-a7 -mfloat-abi=hard -mfpu=neon-vfpv4 -funsafe-math-optimizations -o memSpdPiNEON
#define version "NEON 32 Bit Version 1"
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cpuidh.h"
#include <math.h>
#if defined(__MACH__)
#include <stdlib.h>
#else
#include <malloc.h>
#endif
// #define Integer64Bits
#define Integer32Bits
// #define version "64 Bit Version 4"
// #define version "32 Bit Version 4"
#ifndef options
#define options "vfpv4 32 Bit Version 1"
#endif
double runSecs = 0.1;
int n1;
double *xd;
double *yd;
float *xs;
float *ys;
#ifdef Integer64Bits
long long * xi;
long long * yi;
#else
int *xi;
int *yi;
#endif
void checkTime() {
if (secs < runSecs) {
if (secs < runSecs / 8.0) {
n1 = n1 * 10;
} else {
n1 = (int) (runSecs * 1.25 / secs * (double) n1 + 1);
}
}
}
int main(int argc, char *argv[]) {
int passes[25];
int allocMem[25];
double ramKB = 0.0;
int useMem;
float sums;
float ones = 1.0;
float zeros = 0;
int g, i, m, j, nn;
int kd, ks, ki, mem;
#ifdef Integer64Bits
long long sumi;
#else
int sumi;
#endif
int zeroi = 0;
int onei = 1;
int inc;
double sumd, mbpsd, mbpss, mbpsi;
double oned = 1.0;
double zerod = 0;
double memMB;
int runs = 12; // 8 MB;
int param;
if (argc >= 2) {
sscanf(argv[1], "%d", &runs);
if (runs == 0 || runs > 23) {
printf("Unsupported value for runs, it must be integer, from 1 to 22\n");
printf("Runs determine amount of memory used for benchmark, as power of 2\n");
printf(" Default: 12 = 8 MB\n");
exit(1);
}
}
if (argc >= 3) {
sscanf(argv[2], "%d", ¶m);
if (param > 0) {
if (param > 120) runs = 16;
if (param > 250) runs = 17;
if (param > 500) runs = 18;
if (param > 1000) runs = 19;
}
}
printf(" ##########################################\n");
// ramKB = ramGB * 1000000;
allocMem[0] = 2; // KB two arrays
for (i = 1; i < 23; i++) {
allocMem[i] = allocMem[i - 1] * 2;
}
passes[0] = 250; // 4 KB
passes[1] = 500; // 8 KB
passes[2] = 1000; // 16 KB
passes[3] = 2000; // 32 KB
passes[4] = 4000; // 64 KB
passes[5] = 8000; // 128 KB
passes[6] = 16000; // 256 KB
passes[7] = 32000; // 512 KB
passes[8] = 64000; // 1 MB
passes[9] = 128000; // 2 MB
passes[10] = 256000; // 4 MB
passes[11] = 512000; // 8 MB
passes[12] = 1024000; // 16 MB
passes[13] = 2048000; // 32 MB
passes[14] = 4096000; // 64 MB
passes[15] = 8192000; // 128 MB
passes[16] = 16384000; // 256 MB
passes[17] = 32768000; // 512 MB
passes[18] = 65536000; // 1024 MB
passes[19] = 131072000; // 2048 MB
passes[20] = 262144000; // 4096 MB
passes[21] = 524288000; // 8192 MB
passes[22] = 1048576000; //16384 MB
if (ramKB > 14000) runs = 11;
if (ramKB > 30000) runs = 12;
if (ramKB > 60000) runs = 13;
if (ramKB > 120000) runs = 14;
if (ramKB > 250000) runs = 15;
if (ramKB > 500000) runs = 16;
if (ramKB > 1000000) runs = 17;
if (ramKB > 1500000) runs = 18;
if (ramKB > 3500000) runs = 19;
if (ramKB > 7500000) runs = 20;
if (ramKB > 15500000) runs = 21;
if (ramKB > 31500000) runs = 22;
if (ramKB > 63500000) runs = 23;
useMem = allocMem[runs - 1];
xd = (double *) malloc(useMem * 1024);
if (xd == NULL) {
printf(" ERROR WILL EXIT\n");
exit(1);
}
yd = (double *) malloc(useMem * 1024);
if (yd == NULL) {
printf(" ERROR WILL EXIT\n");
free(xd);
exit(1);
}
float *xs = (float *) xd;
float *ys = (float *) yd;
#ifdef Integer64Bits
long long *xi = (long long *) xd;
long long *yi = (long long *) yd;
#else
int *xi = (int *) xd;
int *yi = (int *) yd;
#endif
local_time();
printf("\n Memory Reading Speed Test %s %s ", options, timeday);
printf(" Copyright (C) 2013, Roy Longbottom\n\n");
printf(" Memory x[m]=x[m]+s*y[m] Int x[m]=x[m]+y[m] x[m]=y[m]\n");
#ifdef Integer64Bits
printf(" KBytes Dble Sngl Int64 Dble Sngl Int64 Dble Sngl Int64\n");
fprintf(outfile, " KBytes Dble Sngl Int64 Dble Sngl Int64 Dble Sngl Int64\n");
#else
printf(" KBytes Dble Sngl Int32 Dble Sngl Int32 Dble Sngl Int32\n");
#endif
printf(" Used MB/S MB/S MB/S MB/S MB/S MB/S MB/S MB/S MB/S\n\n");
for (j = 1; j < runs; j++) {
kd = passes[j];
nn = 6400000 / kd;
if (nn < 1) nn = 1;
ks = kd * 2;
#ifdef Integer64Bits
ki = kd;
#else
ki = kd * 2;
#endif
memMB = (double) kd * 16.0 / 1000000;
mem = (int) ((double) kd * 16.0 / 1000);
inc = 4;
n1 = nn;
do {
sumd = 1.00001;
for (m = 0; m < kd; m++) {
xd[m] = oned;
yd[m] = oned;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < kd; m = m + inc) {
xd[m] = xd[m] + sumd * yd[m];
xd[m + 1] = xd[m + 1] + sumd * yd[m + 1];
xd[m + 2] = xd[m + 2] + sumd * yd[m + 2];
xd[m + 3] = xd[m + 3] + sumd * yd[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
mbpsd = (double) n1 * memMB / secs;
printf("%8d %7d", mem, (int) mbpsd);
n1 = nn;
do {
sums = 1.0001;
for (m = 0; m < ks; m++) {
xs[m] = ones;
ys[m] = ones;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ks; m = m + inc) {
xs[m] = xs[m] + sums * ys[m];
xs[m + 1] = xs[m + 1] + sums * ys[m + 1];
xs[m + 2] = xs[m + 2] + sums * ys[m + 2];
xs[m + 3] = xs[m + 3] + sums * ys[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
mbpss = (double) n1 * memMB / secs;
printf("%7d", (int) mbpss);
n1 = nn;
do {
sumi = nn;
for (m = 0; m < ki; m++) {
xi[m] = zeroi;
yi[m] = zeroi;
}
yi[ki - 1] = onei;
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ki; m = m + inc) {
xi[m] = xi[m] + sumi + yi[m];
xi[m + 1] = xi[m + 1] + sumi + yi[m + 1];
xi[m + 2] = xi[m + 2] + sumi + yi[m + 2];
xi[m + 3] = xi[m + 3] + sumi + yi[m + 3];
}
sumi = -sumi;
}
end_time();
checkTime();
} while (secs < runSecs);
mbpsi = (double) n1 * memMB / secs;
//printf("%8d %7d%7d%7d", mem, (int) mbpsd, (int) mbpss, (int) mbpsi);
printf("%7d", (int) mbpsi);
n1 = nn;
do {
for (m = 0; m < kd; m++) {
xd[m] = zerod;
yd[m] = oned;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < kd; m = m + inc) {
xd[m] = xd[m] + yd[m];
xd[m + 1] = xd[m + 1] + yd[m + 1];
xd[m + 2] = xd[m + 2] + yd[m + 2];
xd[m + 3] = xd[m + 3] + yd[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sumd = xd[1];
mbpsd = (double) n1 * memMB / secs;
printf("%7d", (int) mbpsd);
n1 = nn;
do {
for (m = 0; m < ks; m++) {
xs[m] = zeros;
ys[m] = ones;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ks; m = m + inc) {
xs[m] = xs[m] + ys[m];
xs[m + 1] = xs[m + 1] + ys[m + 1];
xs[m + 2] = xs[m + 2] + ys[m + 2];
xs[m + 3] = xs[m + 3] + ys[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sums = xs[1];
mbpss = (double) n1 * memMB / secs;
printf("%7d", (int) mbpss);
n1 = nn;
do {
for (m = 0; m < ki; m++) {
xi[m] = zeroi;
yi[m] = onei;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ki; m = m + inc) {
xi[m] = xi[m] + yi[m];
xi[m + 1] = xi[m + 1] + yi[m + 1];
xi[m + 2] = xi[m + 2] + yi[m + 2];
xi[m + 3] = xi[m + 3] + yi[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sumi = xi[1];
mbpsi = (double) n1 * memMB / secs;
//printf("%7d%7d%7d", (int) mbpsd, (int) mbpss, (int) mbpsi);
printf("%7d", (int) mbpsi);
memMB = (double) kd * 8 / 1000000;
n1 = nn;
do {
for (m = 0; m < kd + inc; m++) {
xd[m] = zerod;
yd[m] = m;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < kd; m = m + inc) {
xd[m] = yd[m];
xd[m + 1] = yd[m + 1];
xd[m + 2] = yd[m + 2];
xd[m + 3] = yd[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sumd = xd[kd - 1] + 1;
mbpsd = (double) n1 * memMB / secs;
printf("%7d", (int) mbpsd);
n1 = nn;
do {
for (m = 0; m < ks + inc; m++) {
xs[m] = zeros;
ys[m] = m;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ks; m = m + inc) {
xs[m] = ys[m];
xs[m + 1] = ys[m + 1];
xs[m + 2] = ys[m + 2];
xs[m + 3] = ys[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sums = xs[ks - 1] + 1;
mbpss = (double) n1 * memMB / secs;
printf("%7d", (int) mbpss);
n1 = nn;
do {
for (m = 0; m < ki + inc; m++) {
xi[m] = zeroi;
yi[m] = m;
}
start_time();
for (i = 0; i < n1; i++) {
// #pragma omp parallel for
for (m = 0; m < ki; m = m + inc) {
xi[m] = yi[m];
xi[m + 1] = yi[m + 1];
xi[m + 2] = yi[m + 2];
xi[m + 3] = yi[m + 3];
}
}
end_time();
checkTime();
} while (secs < runSecs);
sumi = xi[ki - 1] + 1;
mbpsi = (double) n1 * memMB / secs;
printf("%7d\n", (int) mbpsi);
}
local_time();
printf("\n End of test %s\n", timeday);
free(yd);
free(xd);
char moredata[1024];
return 0;
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "xgboost/base.h"
#include "xgboost/json.h"
#include "xgboost/tree_updater.h"
#include "param.h"
#include "constraints.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
fromJson(config.at("train_param"), &this->param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = toJson(param_);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const {
std::vector<bst_feature_t> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
{
// setup position
position_.resize(gpair.size());
std::fill(position_.begin(), position_.end(), 0);
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
qexpand_.push_back(0);
this->UpdateNode2WorkIndex(tree);
}
this->interaction_constraints_.Configure(param_, fmat.Info().num_col_);
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
FeatureInteractionConstraintHost interaction_constraints_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
lis_matrix_msr.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
/************************************************
* function | SOM |
*-----------------------------+-----+
* lis_matrix_set | o |
* lis_matrix_setDLU | o |
* lis_matrix_malloc | o |
* lis_matrix_elements_copy | o |
* lis_matrix_transpose | o |
* lis_matrix_split | o |
* lis_matrix_merge | o |
*-----------------------------+-----+-----+
* function |merge|split|
*-----------------------------+-----+-----|
* lis_matrix_convert | o | |
* lis_matrix_copy | o | o |
* lis_matrix_get_diagonal | o | o |
* lis_matrix_scaling | o | o |
* lis_matrix_scaling_symm | o | o |
* lis_matrix_normf | o | o |
* lis_matrix_sort | o | o |
* lis_matrix_solve | xxx | o |
* lis_matrix_solvet | xxx | o |
************************************************/
#undef __FUNC__
#define __FUNC__ "lis_matrix_set_msr"
LIS_INT lis_matrix_set_msr(LIS_INT nnz, LIS_INT ndz, LIS_INT *index, LIS_SCALAR *value, LIS_MATRIX A)
{
LIS_INT err;
LIS_DEBUG_FUNC_IN;
#if 0
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
#else
if(lis_matrix_is_assembled(A)) return LIS_SUCCESS;
else {
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
}
#endif
A->index = index;
A->value = value;
A->is_copy = LIS_FALSE;
A->status = -LIS_MATRIX_MSR;
A->nnz = nnz;
A->ndz = ndz;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_setDLU_msr"
LIS_INT lis_matrix_setDLU_msr(LIS_INT lnnz, LIS_INT unnz, LIS_INT lndz, LIS_INT undz, LIS_SCALAR *diag, LIS_INT *lindex, LIS_SCALAR *lvalue,
LIS_INT *uindex, LIS_SCALAR *uvalue, LIS_MATRIX A)
{
LIS_INT err;
LIS_MATRIX_DIAG D;
LIS_DEBUG_FUNC_IN;
#if 0
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
#else
if(lis_matrix_is_assembled(A)) return LIS_SUCCESS;
else {
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
}
#endif
A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_msr::A->L");
if( A->L==NULL )
{
LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT));
return LIS_OUT_OF_MEMORY;
}
A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_msr::A->U");
if( A->U==NULL )
{
LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT));
lis_matrix_DLU_destroy(A);
return LIS_OUT_OF_MEMORY;
}
err = lis_matrix_diag_create(A->n,0,A->comm,&D);
if( err )
{
lis_matrix_DLU_destroy(A);
return err;
}
lis_free(D->value);
D->value = diag;
A->D = D;
A->L->nnz = lnnz;
A->L->ndz = lndz;
A->L->index = lindex;
A->L->value = lvalue;
A->U->nnz = unnz;
A->U->ndz = undz;
A->U->index = uindex;
A->U->value = uvalue;
A->is_copy = LIS_FALSE;
A->status = -LIS_MATRIX_MSR;
A->is_splited = LIS_TRUE;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_malloc_msr"
LIS_INT lis_matrix_malloc_msr(LIS_INT n, LIS_INT nnz, LIS_INT ndz, LIS_INT **index, LIS_SCALAR **value)
{
LIS_DEBUG_FUNC_IN;
*index = NULL;
*value = NULL;
*index = (LIS_INT *)lis_malloc( (nnz+ndz+1)*sizeof(LIS_INT),"lis_matrix_malloc_msr::index" );
if( *index==NULL )
{
LIS_SETERR_MEM((nnz+ndz+1)*sizeof(LIS_INT));
lis_free2(2,*index,*value);
return LIS_OUT_OF_MEMORY;
}
*value = (LIS_SCALAR *)lis_malloc( (nnz+ndz+1)*sizeof(LIS_SCALAR),"lis_matrix_malloc_msr::value" );
if( *value==NULL )
{
LIS_SETERR_MEM((nnz+ndz+1)*sizeof(LIS_SCALAR));
lis_free2(2,*index,*value);
return LIS_OUT_OF_MEMORY;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_elements_copy_msr"
LIS_INT lis_matrix_elements_copy_msr(LIS_INT n, LIS_INT *index, LIS_SCALAR *value,
LIS_INT *o_index, LIS_SCALAR *o_value)
{
LIS_INT i,j;
LIS_DEBUG_FUNC_IN;
#ifdef _OPENMP
#pragma omp parallel private(i,j)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n+1;i++)
{
o_index[i] = index[i];
o_value[i] = value[i];
}
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n;i++)
{
for(j=index[i];j<index[i+1];j++)
{
o_value[j] = value[j];
o_index[j] = index[j];
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_copy_msr"
LIS_INT lis_matrix_copy_msr(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT err;
LIS_INT i,n,nnz,ndz,lnnz,unnz,lndz,undz;
LIS_INT *index;
LIS_INT *lindex;
LIS_INT *uindex;
LIS_SCALAR *value,*lvalue,*uvalue,*diag;
LIS_DEBUG_FUNC_IN;
n = Ain->n;
if( Ain->is_splited )
{
lnnz = Ain->L->nnz;
unnz = Ain->U->nnz;
lndz = Ain->L->ndz;
undz = Ain->U->ndz;
lindex = NULL;
uindex = NULL;
diag = NULL;
err = lis_matrix_malloc_msr(n,lnnz,lndz,&lindex,&lvalue);
if( err )
{
return err;
}
err = lis_matrix_malloc_msr(n,unnz,undz,&uindex,&uvalue);
if( err )
{
lis_free2(5,diag,uindex,lindex,uvalue,lvalue);
return err;
}
diag = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_matrix_copy_msr::diag");
if( diag==NULL )
{
lis_free2(5,diag,uindex,lindex,uvalue,lvalue);
return err;
}
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
diag[i] = Ain->D->value[i];
}
lis_matrix_elements_copy_msr(n,Ain->L->index,Ain->L->value,lindex,lvalue);
lis_matrix_elements_copy_msr(n,Ain->U->index,Ain->U->value,uindex,uvalue);
err = lis_matrix_setDLU_msr(lnnz,unnz,lndz,undz,diag,lindex,lvalue,uindex,uvalue,Aout);
if( err )
{
lis_free2(5,diag,uindex,lindex,uvalue,lvalue);
return err;
}
}
if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) )
{
index = NULL;
value = NULL;
nnz = Ain->nnz;
ndz = Ain->ndz;
err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value);
if( err )
{
return err;
}
lis_matrix_elements_copy_msr(n,Ain->index,Ain->value,index,value);
err = lis_matrix_set_msr(nnz,ndz,index,value,Aout);
if( err )
{
lis_free2(2,index,value);
return err;
}
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_matrix_storage_destroy(Aout);
return err;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_get_diagonal_msr"
LIS_INT lis_matrix_get_diagonal_msr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0; i<n; i++)
{
d[i] = A->D->value[i];
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0; i<n; i++)
{
d[i] = A->value[i];
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_scaling_msr"
LIS_INT lis_matrix_scaling_msr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i,j;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j)
#endif
for(i=0; i<n; i++)
{
A->D->value[i] = 1.0;
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
A->L->value[j] *= d[i];
}
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
A->U->value[j] *= d[i];
}
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j)
#endif
for(i=0; i<n; i++)
{
A->value[i] = 1.0;
for(j=A->index[i];j<A->index[i+1];j++)
{
A->value[j] *= d[i];
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_scaling_symm_msr"
LIS_INT lis_matrix_scaling_symm_msr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i,j;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j)
#endif
for(i=0; i<n; i++)
{
A->D->value[i] = 1.0;
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
A->L->value[j] = A->L->value[j]*d[i]*d[A->L->index[j]];
}
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
A->U->value[j] = A->U->value[j]*d[i]*d[A->U->index[j]];
}
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j)
#endif
for(i=0; i<n; i++)
{
A->value[i] = 1.0;
for(j=A->index[i];j<A->index[i+1];j++)
{
A->value[j] = A->value[j]*d[i]*d[A->index[j]];
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_normf_msr"
LIS_INT lis_matrix_normf_msr(LIS_MATRIX A, LIS_SCALAR *nrm)
{
LIS_INT i,j;
LIS_INT n;
LIS_SCALAR sum;
LIS_DEBUG_FUNC_IN;
n = A->n;
sum = (LIS_SCALAR)0;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum) private(i,j)
#endif
for(i=0; i<n; i++)
{
sum += A->D->value[i]*A->D->value[i];
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
sum += A->L->value[j]*A->L->value[j];
}
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
sum += A->U->value[j]*A->U->value[j];
}
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum) private(i,j)
#endif
for(i=0; i<n; i++)
{
sum += A->value[i]*A->value[i];
for(j=A->index[i];j<A->index[i+1];j++)
{
sum += A->value[j]*A->value[j];
}
}
}
*nrm = sqrt(sum);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_transpose_msr"
LIS_INT lis_matrix_transpose_msr(LIS_MATRIX Ain, LIS_MATRIX *Aout)
{
LIS_DEBUG_FUNC_IN;
/* err = lis_matrix_convert_msr2ccs(Ain,Aout);*/
(*Aout)->matrix_type = LIS_MATRIX_MSR;
(*Aout)->status = LIS_MATRIX_MSR;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_split_msr"
LIS_INT lis_matrix_split_msr(LIS_MATRIX A)
{
LIS_INT i,j,n;
LIS_INT lnnz,unnz;
LIS_INT lndz,undz;
LIS_INT err;
LIS_INT *lindex,*uindex;
LIS_SCALAR *lvalue,*uvalue;
#ifdef _OPENMP
LIS_INT kl,ku;
LIS_INT *liw,*uiw;
#endif
LIS_MATRIX_DIAG D;
LIS_DEBUG_FUNC_IN;
n = A->n;
lnnz = 0;
unnz = 0;
lndz = n;
undz = n;
D = NULL;
lindex = NULL;
lvalue = NULL;
uindex = NULL;
uvalue = NULL;
#ifdef _OPENMP
liw = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_matrix_split_msr::liw");
if( liw==NULL )
{
LIS_SETERR_MEM((n+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
uiw = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_matrix_split_msr::uiw");
if( uiw==NULL )
{
LIS_SETERR_MEM((n+1)*sizeof(LIS_INT));
lis_free(liw);
return LIS_OUT_OF_MEMORY;
}
#pragma omp parallel for private(i)
for(i=0;i<n+1;i++)
{
liw[i] = 0;
uiw[i] = 0;
}
#pragma omp parallel for private(i,j)
for(i=0;i<n;i++)
{
for(j=A->index[i];j<A->index[i+1];j++)
{
if( A->index[j]<i )
{
liw[i+1]++;
}
else if( A->index[j]>i )
{
uiw[i+1]++;
}
}
}
liw[0] = n+1;
uiw[0] = n+1;
for(i=0;i<n;i++)
{
liw[i+1] += liw[i];
uiw[i+1] += uiw[i];
}
lnnz = liw[n];
unnz = uiw[n];
#else
for(i=0;i<n;i++)
{
for(j=A->index[i];j<A->index[i+1];j++)
{
if( A->index[j]<i )
{
lnnz++;
}
else if( A->index[j]>i )
{
unnz++;
}
}
}
#endif
err = lis_matrix_LU_create(A);
if( err )
{
return err;
}
err = lis_matrix_malloc_msr(n,lnnz,lndz,&lindex,&lvalue);
if( err )
{
return err;
}
err = lis_matrix_malloc_msr(n,unnz,undz,&uindex,&uvalue);
if( err )
{
lis_free2(4,lindex,lvalue,uindex,uvalue);
return err;
}
err = lis_matrix_diag_duplicateM(A,&D);
if( err )
{
lis_free2(4,lindex,lvalue,uindex,uvalue);
return err;
}
#ifdef _OPENMP
#pragma omp parallel for private(i)
for(i=0;i<n+1;i++)
{
lindex[i] = liw[i];
uindex[i] = uiw[i];
}
#pragma omp parallel for private(i,j,kl,ku)
for(i=0;i<n;i++)
{
kl = lindex[i];
ku = uindex[i];
D->value[i] = A->value[i];
for(j=A->index[i];j<A->index[i+1];j++)
{
if( A->index[j]<i )
{
lindex[kl] = A->index[j];
lvalue[kl] = A->value[j];
kl++;
}
else if( A->index[j]>i )
{
uindex[ku] = A->index[j];
uvalue[ku] = A->value[j];
ku++;
}
}
}
lis_free2(2,liw,uiw);
#else
lnnz = n+1;
unnz = n+1;
lindex[0] = n+1;
uindex[0] = n+1;
for(i=0;i<n;i++)
{
D->value[i] = A->value[i];
for(j=A->index[i];j<A->index[i+1];j++)
{
if( A->index[j]<i )
{
lindex[lnnz] = A->index[j];
lvalue[lnnz] = A->value[j];
lnnz++;
}
else if( A->index[j]>i )
{
uindex[unnz] = A->index[j];
uvalue[unnz] = A->value[j];
unnz++;
}
}
lindex[i+1] = lnnz;
uindex[i+1] = unnz;
}
#endif
A->L->nnz = lnnz - (n+1);
A->L->ndz = lndz;
A->L->index = lindex;
A->L->value = lvalue;
A->U->nnz = unnz - (n+1);
A->U->ndz = undz;
A->U->index = uindex;
A->U->value = uvalue;
A->D = D;
A->is_splited = LIS_TRUE;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_merge_msr"
LIS_INT lis_matrix_merge_msr(LIS_MATRIX A)
{
LIS_INT i,j,n,is;
LIS_INT nnz,ndz;
LIS_INT err;
LIS_INT *index;
LIS_SCALAR *value;
LIS_DEBUG_FUNC_IN;
n = A->n;
nnz = 0;
ndz = 0;
is = A->is;
index = NULL;
value = NULL;
nnz = A->L->nnz + A->U->nnz + n;
err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value);
if( err )
{
return err;
}
nnz = n+1;
index[0] = n+1;
if( A->matrix_type==LIS_MATRIX_MSR )
{
for(i=0;i<n;i++)
{
value[i] = A->D->value[i];
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
index[nnz] = A->L->index[j];
value[nnz] = A->L->value[j];
nnz++;
}
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
index[nnz] = A->U->index[j];
value[nnz] = A->U->value[j];
nnz++;
}
index[i+1] = nnz;
}
}
else
{
for(i=0;i<n;i++)
{
value[i] = A->D->value[i];
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
index[nnz] = A->U->index[j];
value[nnz] = A->U->value[j];
nnz++;
}
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
index[nnz] = A->L->index[j];
value[nnz] = A->L->value[j];
nnz++;
}
index[i+1] = nnz;
}
}
A->nnz = nnz;
A->ndz = ndz;
A->value = value;
A->index = index;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_sort_msr"
LIS_INT lis_matrix_sort_msr(LIS_MATRIX A)
{
LIS_INT i,n;
LIS_DEBUG_FUNC_IN;
if( !A->is_sorted )
{
n = A->n;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
lis_sort_id(A->L->ptr[i],A->L->ptr[i+1]-1,A->L->index,A->L->value);
lis_sort_id(A->U->ptr[i],A->U->ptr[i+1]-1,A->U->index,A->U->value);
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
lis_sort_id(A->ptr[i],A->ptr[i+1]-1,A->index,A->value);
}
}
A->is_sorted = LIS_TRUE;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_solve_msr"
LIS_INT lis_matrix_solve_msr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag)
{
LIS_INT i,j,n;
LIS_SCALAR t;
LIS_SCALAR *b,*x;
LIS_DEBUG_FUNC_IN;
n = A->n;
b = B->value;
x = X->value;
switch(flag)
{
case LIS_MATRIX_LOWER:
for(i=0;i<n;i++)
{
t = b[i];
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
t -= A->L->value[j] * x[A->L->index[j]];
}
x[i] = t * A->WD->value[i];
}
break;
case LIS_MATRIX_UPPER:
for(i=n-1;i>=0;i--)
{
t = b[i];
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
t -= A->U->value[j] * x[A->U->index[j]];
}
x[i] = t * A->WD->value[i];
}
break;
case LIS_MATRIX_SSOR:
for(i=0;i<n;i++)
{
t = b[i];
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
t -= A->L->value[j] * x[A->L->index[j]];
}
x[i] = t * A->WD->value[i];
}
for(i=n-1;i>=0;i--)
{
t = 0.0;
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
if( A->U->index[j]>=n ) continue;
t += A->U->value[j] * x[A->U->index[j]];
}
x[i] -= t * A->WD->value[i];
}
break;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_solvet_msr"
LIS_INT lis_matrix_solvet_msr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag)
{
LIS_INT i,j,n;
LIS_SCALAR t;
LIS_SCALAR *b,*x;
LIS_DEBUG_FUNC_IN;
n = A->n;
b = B->value;
x = X->value;
lis_vector_copy(B,X);
switch(flag)
{
case LIS_MATRIX_LOWER:
for(i=n-1;i>=0;i--)
{
x[i] = x[i] * A->WD->value[i];
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
x[A->U->index[j]] -= A->U->value[j] * x[i];
}
}
break;
case LIS_MATRIX_UPPER:
for(i=0;i<n;i++)
{
x[i] = x[i] * A->WD->value[i];
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
x[A->L->index[j]] -= A->L->value[j] * x[i];
}
}
break;
case LIS_MATRIX_SSOR:
for(i=0;i<n;i++)
{
t = x[i] * A->WD->value[i];
for(j=A->U->index[i];j<A->U->index[i+1];j++)
{
x[A->U->index[j]] -= A->U->value[j] * t;
}
}
for(i=n-1;i>=0;i--)
{
t = x[i] * A->WD->value[i];
x[i] = t;
for(j=A->L->index[i];j<A->L->index[i+1];j++)
{
x[A->L->index[j]] -= A->L->value[j] * t;
}
}
break;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_convert_crs2msr"
LIS_INT lis_matrix_convert_crs2msr(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT i,j,k,jj;
LIS_INT err;
LIS_INT n,nnz,ndz;
LIS_INT count;
LIS_INT *iw;
LIS_INT *index;
LIS_SCALAR *value;
LIS_DEBUG_FUNC_IN;
n = Ain->n;
nnz = Ain->nnz;
iw = NULL;
index = NULL;
value = NULL;
iw = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2msr::iw" );
if( iw==NULL )
{
LIS_SETERR_MEM((n+1)*sizeof(LIS_INT));
return LIS_ERR_OUT_OF_MEMORY;
}
/* check ndz */
for(i=0;i<n+1;i++) iw[i] = 0;
count = 0;
#ifdef _OPENMP
#pragma omp parallel private(i,j)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n;i++)
{
iw[i+1] = 0;
for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++)
{
if( i==Ain->index[j] )
{
iw[i+1] = 1;
}
}
}
#ifdef _OPENMP
#pragma omp for reduction(+:count)
#endif
for(i=0;i<n;i++)
{
count += iw[i+1];
}
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n;i++)
{
iw[i+1] = Ain->ptr[i+1]-Ain->ptr[i]-iw[i+1];
}
}
ndz = n - count;
err = lis_matrix_malloc_msr(n,nnz,ndz,&index,&value);
if( err )
{
lis_free2(3,index,value,iw);
return err;
}
/* convert msr */
iw[0] = n+1;
for(i=0;i<n;i++)
{
iw[i+1] = iw[i+1] + iw[i];
}
#ifdef _OPENMP
#pragma omp parallel private(i,j,k)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n+1;i++)
{
index[i] = iw[i];
}
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n;i++)
{
k = index[i];
for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++)
{
jj = Ain->index[j];
if( jj==i )
{
value[i] = Ain->value[j];
}
else
{
value[k] = Ain->value[j];
index[k] = Ain->index[j];
k++;
}
}
}
}
err = lis_matrix_set_msr(nnz,ndz,index,value,Aout);
if( err )
{
lis_free2(3,index,value,iw);
return err;
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_free(iw);
lis_matrix_storage_destroy(Aout);
return err;
}
lis_free(iw);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_convert_msr2crs"
LIS_INT lis_matrix_convert_msr2crs(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT i,j,k;
LIS_INT err;
LIS_INT n,nnz,is;
LIS_INT *ptr,*index;
LIS_SCALAR *value;
LIS_DEBUG_FUNC_IN;
n = Ain->n;
nnz = Ain->nnz;
is = Ain->is;
ptr = NULL;
index = NULL;
value = NULL;
err = lis_matrix_malloc_crs(n,nnz,&ptr,&index,&value);
if( err )
{
return err;
}
/* convert crs */
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
ptr[i+1] = Ain->index[i+1] - Ain->index[i];
if( Ain->value[i]!=0.0 )
{
ptr[i+1]++;
}
}
ptr[0] = 0;
for(i=0;i<n;i++)
{
ptr[i+1] += ptr[i];
}
#ifdef _OPENMP
#pragma omp parallel for private(i,j,k)
#endif
for(i=0;i<n;i++)
{
k = ptr[i];
if( Ain->value[i]!=(LIS_SCALAR)0.0 )
{
value[k] = Ain->value[i];
index[k] = i;
k++;
}
for(j=Ain->index[i];j<Ain->index[i+1];j++)
{
value[k] = Ain->value[j];
index[k] = Ain->index[j];
k++;
}
}
err = lis_matrix_set_crs(nnz,ptr,index,value,Aout);
if( err )
{
lis_free2(3,ptr,index,value);
return err;
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_matrix_storage_destroy(Aout);
return err;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
BRKGA.h | /*
* BRKGA.h
*
* This class encapsulates a Biased Random-key Genetic Algorithm (for minimization problems) with K
* independent Populations stored in two vectors of Population, current and previous. It supports
* multi-threading via OpenMP, and implements the following key methods:
*
* - BRKGA() constructor: initializes the populations with parameters described below.
* - evolve() operator: evolve each Population following the BRKGA methodology. This method
* supports OpenMP to evolve up to K independent Populations in parallel.
* Please note that double Decoder::decode(...) MUST be thread-safe.
*
* Required hyperparameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations
* - MAX_THREADS: number of threads to perform parallel decoding -- WARNING: Decoder::decode() MUST
* be thread-safe!
*
* Required templates are:
* RNG: random number generator that implements the methods below.
* - RNG(unsigned long seed) to initialize a new RNG with 'seed'
* - double rand() to return a double precision random deviate in range [0,1)
* - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1)
* - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32
*
* Decoder: problem-specific decoder that implements any of the decode methods outlined below. When
* compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via
* OpenMP), the method must be thread-safe.
* - double decode(const vector< double >& chromosome) const, if you don't want to change
* chromosomes inside the framework, or
* - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome
*
* Created on : Jun 22, 2010 by rtoso
* Last update: Sep 28, 2010 by rtoso
* Authors: Rodrigo Franco Toso <rtoso@cs.rutgers.edu>
*/
#ifndef BRKGA_H
#define BRKGA_H
#include <omp.h>
#include <algorithm>
#include <exception>
#include <stdexcept>
#include "Population.h"
template< class Decoder, class RNG >
class BRKGA {
public:
/*
* Default constructor
* Required hyperparameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations
* - MAX_THREADS: number of threads to perform parallel decoding
* WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as
* + double Decoder::decode(std::vector< double >& chromosome) const
*/
BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe,
const Decoder& refDecoder, RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1);
/**
* Destructor
*/
~BRKGA();
/**
* Resets all populations with brand new keys
*/
void reset();
/**
* Evolve the current populations following the guidelines of BRKGAs
* @param generations number of generations (must be even and nonzero)
* @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization)
* @param M number of elite chromosomes to select from each population in order to exchange
*/
void evolve(unsigned generations = 1);
/**
* Exchange elite-solutions between the populations
* @param M number of elite chromosomes to select from each population
*/
void exchangeElite(unsigned M);
/**
* Set individuals to initial population (only one population in case of multiple ones).
* @param chromosomes a set of individuals described as double vectors
* between 0 and 1.
*/
void setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population);
void replaceWorst(const std::vector< std::vector< double > >& chromosomes, int population_size, int population);
/**
* Returns the current population
*/
const Population& getPopulation(unsigned k = 0) const;
/**
* Returns the chromosome with best fitness so far among all populations
*/
const std::vector< double >& getBestChromosome() const;
/**
* Returns the best fitness found so far among all populations
*/
double getBestFitness() const;
// Return copies to the internal parameters:
unsigned getN() const;
unsigned getP() const;
unsigned getPe() const;
unsigned getPm() const;
unsigned getPo() const;
double getRhoe() const;
unsigned getK() const;
unsigned getMAX_THREADS() const;
private:
// Hyperparameters:
const unsigned n; // number of genes in the chromosome
const unsigned p; // number of elements in the population
const unsigned pe; // number of elite items in the population
const unsigned pm; // number of mutants introduced at each generation into the population
const double rhoe; // probability that an offspring inherits the allele of its elite parent
// Templates:
RNG& refRNG; // reference to the random number generator
const Decoder& refDecoder; // reference to the problem-dependent Decoder
// Parallel populations parameters:
const unsigned K; // number of independent parallel populations
const unsigned MAX_THREADS; // number of threads for parallel decoding
// Data:
std::vector< Population* > previous; // previous populations
std::vector< Population* > current; // current populations
// Local operations:
void initialize(const unsigned i); // initialize current population 'i' with random keys
void evolution(Population& curr, Population& next);
bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const;
};
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe,
const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) : n(_n), p(_p),
pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe),
refRNG(rng), refDecoder(decoder), K(_K), MAX_THREADS(MAX),
previous(K, 0), current(K, 0) {
// Error check:
using std::range_error;
if(n == 0) { throw range_error("Chromosome size equals zero."); }
if(p == 0) { throw range_error("Population size equals zero."); }
if(pe == 0) { throw range_error("Elite-set size equals zero."); }
if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); }
if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); }
if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); }
if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); }
// Initialize and decode each chromosome of the current population, then copy to previous:
for(unsigned i = 0; i < K; ++i) {
// Allocate:
current[i] = new Population(n, p);
// Initialize:
initialize(i);
// Then just copy to previous:
previous[i] = new Population(*current[i]);
}
}
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::~BRKGA() {
for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; }
}
template< class Decoder, class RNG >
const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const {
return (*current[k]);
}
template< class Decoder, class RNG >
double BRKGA< Decoder, RNG >::getBestFitness() const {
double best = current[0]->fitness[0].first;
for(unsigned i = 1; i < K; ++i) {
if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; }
}
return best;
}
template< class Decoder, class RNG >
const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const {
unsigned bestK = 0;
for(unsigned i = 1; i < K; ++i) {
if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; }
}
return current[bestK]->getChromosome(0); // The top one :-)
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::reset() {
for(unsigned i = 0; i < K; ++i) { initialize(i); }
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::evolve(unsigned generations) {
if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); }
for(unsigned i = 0; i < generations; ++i) {
for(unsigned j = 0; j < K; ++j) {
evolution(*current[j], *previous[j]); // First evolve the population (curr, next)
std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next)
}
}
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) {
if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); }
for(unsigned i = 0; i < K; ++i) {
// Population i will receive some elite members from each Population j below:
unsigned dest = p - 1; // Last chromosome of i (will be updated below)
for(unsigned j = 0; j < K; ++j) {
if(j == i) { continue; }
// Copy the M best of Population j into Population i:
for(unsigned m = 0; m < M; ++m) {
// Copy the m-th best of Population j into the 'dest'-th position of Population i:
const std::vector< double >& bestOfJ = current[j]->getChromosome(m);
std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin());
current[i]->fitness[dest].first = current[j]->fitness[m].first;
--dest;
}
}
}
for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); }
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population) {
//current[0] = new Population(n, chromosomes.size());
unsigned i = 0;
for(std::vector< std::vector< double > >::const_iterator it_chrom = chromosomes.begin();
it_chrom != chromosomes.end() && i < ini_population_size; ++it_chrom, ++i) {
if(it_chrom->size() != n) {
throw std::runtime_error("Error on setting initial population: number of genes isn't equal!");
}
std::copy(it_chrom->begin(), it_chrom->end(), current[population]->population[i].begin());
//std::cout << "VOU CALCULAR CUSTO" << std::endl;
current[population]->setFitness(i, refDecoder.decode((*current[population])(i)) );
}
current[population]->sortFitness();
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::replaceWorst(const std::vector< std::vector< double > >& chromosomes, int population_size, int population) {
//current[0] = new Population(n, chromosomes.size());
unsigned i = p-1;
//std::cerr << "Vou entrar no for" << std::endl;
for(std::vector< std::vector< double > >::const_iterator it_chrom = chromosomes.begin();
it_chrom != chromosomes.end() && i > p - population_size; ++it_chrom, --i) {
if(it_chrom->size() != n) {
throw std::runtime_error("Error on setting initial population: number of genes isn't equal!");
}
//std::cerr << "Vou copiar" << std::endl;
std::copy(it_chrom->begin(), it_chrom->end(), current[population]->population[i].begin());
//std::cout << "VOU CALCULAR CUSTO" << std::endl;
current[population]->setFitness(i, refDecoder.decode((*current[population])(i)) );
}
//std::cerr << "Vou ordenar" << std::endl;
current[population]->sortFitness();
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) {
for(unsigned j = 0; j < p; ++j) {
for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); }
}
//std::cout << "INICIALIZANDO" << std::endl;
// Decode:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int j = 0; j < int(p); ++j) {
current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) );
}
// Sort:
current[i]->sortFitness();
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) {
// We now will set every chromosome of 'current', iterating with 'i':
unsigned i = 0; // Iterate chromosome by chromosome
unsigned j = 0; // Iterate allele by allele
// 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current':
while(i < pe) {
for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); }
next.fitness[i].first = curr.fitness[i].first;
next.fitness[i].second = i;
++i;
}
// 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm:
while(i < p - pm) {
// Select an elite parent:
const unsigned eliteParent = (refRNG.randInt(pe - 1));
// Select a non-elite parent:
const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1));
// Mate:
for(j = 0; j < n; ++j) {
const unsigned sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent);
next(i, j) = curr(curr.fitness[sourceParent].second, j);
//next(i, j) = (refRNG.rand() < rhoe) ? curr(curr.fitness[eliteParent].second, j) :
// curr(curr.fitness[noneliteParent].second, j);
}
++i;
}
// We'll introduce 'pm' mutants:
/*while(i < p) {
for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); }
++i;
}*/
while(i < p) {
for(j = 0; j < n; ++j) {
next(i, j) = refRNG.rand();
}
++i;
}
// Time to compute fitness, in parallel:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int i = int(pe); i < int(p); ++i) {
next.setFitness( i, refDecoder.decode(next.population[i]) );
}
// Now we must sort 'current' by fitness, since things might have changed:
next.sortFitness();
}
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getN() const { return n; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getP() const { return p; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; }
template< class Decoder, class RNG >
double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getK() const { return K; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; }
#endif
|
edge_miner.h | #ifndef EDGE_MINER_H
#define EDGE_MINER_H
#include <mutex>
#include <numeric>
#include "miner.h"
#include "domain_support.h"
typedef std::pair<unsigned, unsigned> InitPattern;
typedef QuickPattern<EdgeEmbedding, ElementType> QPattern;
typedef CanonicalGraph<EdgeEmbedding, ElementType> CPattern;
typedef std::unordered_map<QPattern, Frequency> QpMapFreq; // quick pattern map (mapping quick pattern to its frequency)
typedef std::unordered_map<CPattern, Frequency> CgMapFreq; // canonical pattern map (mapping canonical pattern to its frequency)
typedef std::map<InitPattern, DomainSupport*> InitMap;
typedef std::unordered_map<QPattern, DomainSupport*> QpMapDomain; // quick pattern map (mapping quick pattern to its domain support)
typedef std::unordered_map<CPattern, DomainSupport*> CgMapDomain; // canonical pattern map (mapping canonical pattern to its domain support)
typedef std::unordered_map<unsigned, unsigned> FreqMap;
typedef std::unordered_map<unsigned, bool> DomainMap;
typedef PerThreadStorage<InitMap> LocalInitMap;
typedef PerThreadStorage<QpMapFreq> LocalQpMapFreq; // PerThreadStorage: thread-local quick pattern map
typedef PerThreadStorage<CgMapFreq> LocalCgMapFreq; // PerThreadStorage: thread-local canonical pattern map
typedef PerThreadStorage<QpMapDomain> LocalQpMapDomain;
typedef PerThreadStorage<CgMapDomain> LocalCgMapDomain;
class EdgeMiner : public Miner {
public:
EdgeMiner(Graph *g, unsigned size = 3, int nthreads = 1) {
graph = g;
max_size = size;
numThreads = nthreads;
construct_edgemap();
init_localmaps.set_size(nthreads);
qp_localmaps.set_size(nthreads);
cg_localmaps.set_size(nthreads);
}
virtual ~EdgeMiner() {}
void extend_edge(unsigned level, EmbeddingList& emb_list) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
num_new_emb[pos] = 0;
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
if (emb.get_key(i) == 0) { // TODO: need to fix this
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set))
num_new_emb[pos] ++;
}
}
}
emb.clean();
}
Ulong new_size = std::accumulate(num_new_emb.begin(), num_new_emb.end(), (Ulong)0);
std::cout << "new_size = " << new_size << "\n";
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
UintList indices = parallel_prefix_sum(num_new_emb);
new_size = indices[indices.size()-1];
emb_list.add_level(new_size);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned start = indices[pos];
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
IndexT src = emb.get_vertex(i);
if (emb.get_key(i) == 0) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_his(level+1, start, i);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
}
}
inline unsigned init_aggregator() {
init_map.clear();
for (IndexT src = 0; src < graph->num_vertices(); src ++) {
InitMap *lmap = init_localmaps.getLocal();
auto src_label = graph->getData(src);
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
auto dst_label = graph->getData(dst);
if (src_label <= dst_label) {
InitPattern key = get_init_pattern(src_label, dst_label);
if (lmap->find(key) == lmap->end()) {
(*lmap)[key] = new DomainSupport(2);
(*lmap)[key]->set_threshold(threshold);
}
(*lmap)[key]->add_vertex(0, src);
(*lmap)[key]->add_vertex(1, dst);
}
}
}
merge_init_map();
std::cout << "Number of single-edge patterns: " << init_map.size() << "\n";
unsigned count = 0;
for (auto it = init_map.begin(); it != init_map.end(); ++it)
if (it->second->get_support()) count ++;
return count; // return number of frequent single-edge patterns
}
inline void quick_aggregate(unsigned level, EmbeddingList& emb_list) {
for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear();
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
QpMapDomain *lmap = qp_localmaps.getLocal();
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned n = emb.size();
QPattern qp(emb, true);
bool qp_existed = false;
auto it = lmap->find(qp);
if (it == lmap->end()) {
(*lmap)[qp] = new DomainSupport(n);
(*lmap)[qp]->set_threshold(threshold);
emb_list.set_pid(pos, qp.get_id());
} else {
qp_existed = true;
emb_list.set_pid(pos, (it->first).get_id());
}
for (unsigned i = 0; i < n; i ++) {
if ((*lmap)[qp]->has_domain_reached_support(i) == false)
(*lmap)[qp]->add_vertex(i, emb.get_vertex(i));
}
if (qp_existed) qp.clean();
}
}
void insert_id_map(int qp_id, int cg_id) {
std::unique_lock<std::mutex> lock(map_mutex);
id_map.insert(std::make_pair(qp_id, cg_id));
}
// aggregate quick patterns into canonical patterns.
// construct id_map from quick pattern ID (qp_id) to canonical pattern ID (cg_id)
void canonical_aggregate() {
id_map.clear();
for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear();
for (std::pair<QPattern, DomainSupport*> element : qp_map) {
CgMapDomain *lmap = cg_localmaps.getLocal();
unsigned num_domains = element.first.get_size();
CPattern cg(element.first);
int qp_id = element.first.get_id();
int cg_id = cg.get_id();
insert_id_map(qp_id, cg_id);
auto it = lmap->find(cg);
if (it == lmap->end()) {
(*lmap)[cg] = new DomainSupport(num_domains);
(*lmap)[cg]->set_threshold(threshold);
element.first.set_cgid(cg.get_id());
} else {
element.first.set_cgid((it->first).get_id());
}
VertexPositionEquivalences equivalences;
element.first.get_equivalences(equivalences);
for (unsigned i = 0; i < num_domains; i ++) {
if ((*lmap)[cg]->has_domain_reached_support(i) == false) {
unsigned qp_idx = cg.get_quick_pattern_index(i);
assert(qp_idx >= 0 && qp_idx < num_domains);
UintSet equ_set = equivalences.get_equivalent_set(qp_idx);
for (unsigned idx : equ_set) {
DomainSupport *support = element.second;
if (support->has_domain_reached_support(idx) == false) {
bool reached_threshold = (*lmap)[cg]->add_vertices(i, support->domain_sets[idx]);
if (reached_threshold) break;
} else {
(*lmap)[cg]->set_domain_frequent(i);
break;
}
}
}
}
cg.clean();
}
}
inline void merge_init_map() {
init_map = *(init_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
for (auto element : *init_localmaps.getLocal(i)) {
DomainSupport *support = element.second;
if (init_map.find(element.first) == init_map.end()) {
init_map[element.first] = support;
} else {
for (unsigned i = 0; i < 2; i ++) {
if (!init_map[element.first]->has_domain_reached_support(i)) {
if (support->has_domain_reached_support(i))
init_map[element.first]->set_domain_frequent(i);
else init_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
}
inline void merge_qp_map(unsigned num_domains) {
qp_map.clear();
qp_map = *(qp_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const QpMapDomain *lmap = qp_localmaps.getLocal(i);
for (auto element : *lmap) {
if (qp_map.find(element.first) == qp_map.end())
qp_map[element.first] = element.second;
}
for (std::pair<QPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!qp_map[element.first]->has_domain_reached_support(i) && qp_map[element.first] != support) {
if (support->has_domain_reached_support(i))
qp_map[element.first]->set_domain_frequent(i);
else qp_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
inline void merge_cg_map(unsigned num_domains) {
cg_map.clear();
cg_map = *(cg_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const CgMapDomain *lmap = cg_localmaps.getLocal(i);
for (auto element : *lmap) {
if (cg_map.find(element.first) == cg_map.end())
cg_map[element.first] = element.second;
}
for (std::pair<CPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!cg_map[element.first]->has_domain_reached_support(i) && cg_map[element.first] != support) {
if (support->has_domain_reached_support(i))
cg_map[element.first]->set_domain_frequent(i);
else cg_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
// Filtering for FSM
#ifdef ENABLE_LABEL
inline void init_filter(EmbeddingList& emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
auto src_label = graph->getData(src);
auto dst_label = graph->getData(dst);
InitPattern key = get_init_pattern(src_label, dst_label);
if (init_map[key]->get_support()) is_frequent_emb[pos] = 1;
}
//assert(emb_list.size()*2 == graph->num_edges()); // symmetric graph
is_frequent_edge.resize(graph->num_edges());
std::fill(is_frequent_edge.begin(), is_frequent_edge.end(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
unsigned eid0 = edge_map[OrderedEdge(src,dst)];
unsigned eid1 = edge_map[OrderedEdge(dst,src)];
__sync_bool_compare_and_swap(&is_frequent_edge[eid0], 0, 1);
__sync_bool_compare_and_swap(&is_frequent_edge[eid1], 0, 1);
}
}
std::cout << "Number of frequent edges: " << count(is_frequent_edge.begin(), is_frequent_edge.end(), 1) << "\n";
UintList indices = parallel_prefix_sum(is_frequent_emb);
auto vid_list0 = emb_list.get_idx_list(1);
auto vid_list1 = emb_list.get_vid_list(1);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = vid_list0[pos];
VertexId dst = vid_list1[pos];
unsigned start = indices[pos];
emb_list.set_vid(1, start, dst);
emb_list.set_idx(1, start, src);
}
}
emb_list.remove_tail(indices.back());
}
#endif
inline void filter(unsigned level, EmbeddingList &emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
unsigned qp_id = emb_list.get_pid(pos);
unsigned cg_id = id_map.at(qp_id);
if (domain_support_map.at(cg_id))
is_frequent_emb[pos] = 1;
}
UintList indices = parallel_prefix_sum(is_frequent_emb);
VertexList vid_list = emb_list.get_vid_list(level);
UintList idx_list = emb_list.get_idx_list(level);
ByteList his_list = emb_list.get_his_list(level);
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
unsigned start = indices[pos];
VertexId vid = vid_list[pos];
IndexTy idx = idx_list[pos];
BYTE his = his_list[pos];
emb_list.set_idx(level, start, idx);
emb_list.set_vid(level, start, vid);
emb_list.set_his(level, start, his);
}
}
emb_list.remove_tail(indices.back());
}
inline void set_threshold(const unsigned minsup) { threshold = minsup; }
inline void printout_agg(const CgMapFreq &cg_map) {
for (auto it = cg_map.begin(); it != cg_map.end(); ++it)
std::cout << "{" << it->first << " --> " << it->second << std::endl;
}
inline void printout_agg() {
std::cout << "num_patterns: " << cg_map.size() << " num_quick_patterns: " << qp_map.size() << "\n";
BoolVec support(cg_map.size());
int i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
support[i] = it->second->get_support();
i ++;
}
i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
std::cout << "{" << it->first << " --> " << support[i] << std::endl;
i ++;
}
}
inline unsigned support_count() {
domain_support_map.clear();
unsigned count = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
bool support = it->second->get_support();
domain_support_map.insert(std::make_pair(it->first.get_id(), support));
if (support) count ++;
}
return count;
}
// construct edge-map for later use. May not be necessary if Galois has this support
void construct_edgemap() {
for (auto src = 0; src < graph->num_vertices(); src ++) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
auto dst = graph->getEdgeDst(e);
OrderedEdge edge(src, dst);
edge_map.insert(std::pair<OrderedEdge, unsigned>(edge, e));
}
}
}
private:
unsigned threshold;
InitMap init_map;
UintMap id_map;
unsigned max_size;
int numThreads;
FreqMap freq_support_map;
DomainMap domain_support_map;
std::map<OrderedEdge, unsigned> edge_map;
std::set<std::pair<VertexId,VertexId> > freq_edge_set;
std::vector<unsigned> is_frequent_edge;
LocalInitMap init_localmaps; // initialization map, only used for once, no need to clear
LocalQpMapDomain qp_localmaps; // quick pattern local map for each thread
LocalCgMapDomain cg_localmaps; // canonical pattern local map for each thread
QpMapDomain qp_map; // quick pattern map
CgMapDomain cg_map; // canonical graph map
std::mutex map_mutex;
inline InitPattern get_init_pattern(BYTE src_label, BYTE dst_label) {
if (src_label <= dst_label) return std::make_pair(src_label, dst_label);
else return std::make_pair(dst_label, src_label);
}
inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EdgeEmbedding &emb) {
VertexId vid = emb_list.get_vid(level, pos);
IndexTy idx = emb_list.get_idx(level, pos);
BYTE his = emb_list.get_his(level, pos);
BYTE lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level, ele);
for (unsigned l = 1; l < level; l ++) {
vid = emb_list.get_vid(level-l, idx);
his = emb_list.get_his(level-l, idx);
lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level-l, ele);
idx = emb_list.get_idx(level-l, idx);
}
lab = graph->getData(idx);
ElementType ele0(idx, 0, lab, 0);
emb.set_element(0, ele0);
}
bool is_quick_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed) {
if (dst <= emb.get_vertex(0)) return true;
if (dst == emb.get_vertex(1)) return true;
if (history == 0 && dst < emb.get_vertex(1)) return true;
if (size == 2) {
} else if (size == 3) {
if (history == 0 && emb.get_history(2) == 0 && dst <= emb.get_vertex(2)) return true;
if (history == 0 && emb.get_history(2) == 1 && dst == emb.get_vertex(2)) return true;
if (history == 1 && emb.get_history(2) == 1 && dst <= emb.get_vertex(2)) return true;
if (dst == emb.get_vertex(2)) existed = 1;
//if (!existed && max_size < 4) return true;
} else {
std::cout << "Error: should go to detailed check\n";
}
return false;
}
bool is_edge_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed, const std::set<VertexId>& vertex_set) {
if (size < 3) return is_quick_automorphism(size, emb, history, src, dst, existed);
// check with the first element
if (dst <= emb.get_vertex(0)) return true;
if (history == 0 && dst <= emb.get_vertex(1)) return true;
// check loop edge
if (dst == emb.get_vertex(emb.get_history(history))) return true;
if (vertex_set.find(dst) != vertex_set.end()) existed = 1;
// check to see if there already exists the vertex added;
// if so, just allow to add edge which is (smaller id -> bigger id)
if (existed && src > dst) return true;
std::pair<VertexId, VertexId> added_edge(src, dst);
for (unsigned index = history + 1; index < emb.size(); ++index) {
std::pair<VertexId, VertexId> edge;
edge.first = emb.get_vertex(emb.get_history(index));
edge.second = emb.get_vertex(index);
//assert(edge.first != edge.second);
int cmp = compare(added_edge, edge);
if(cmp <= 0) return true;
}
return false;
}
inline void swap(std::pair<VertexId, VertexId>& pair) {
if (pair.first > pair.second) {
VertexId tmp = pair.first;
pair.first = pair.second;
pair.second = tmp;
}
}
inline int compare(std::pair<VertexId, VertexId>& oneEdge, std::pair<VertexId, VertexId>& otherEdge) {
swap(oneEdge);
swap(otherEdge);
if(oneEdge.first == otherEdge.first) return oneEdge.second - otherEdge.second;
else return oneEdge.first - otherEdge.first;
}
};
#endif // EDGE_MINER_HPP_
|
k_clique_count_set_based.h | #pragma once
#include <gms/representations/graphs/set_graph.h>
template <class SGraph, class Set>
size_t RecursiveStepCliqueCount(SGraph& graph, const size_t k, const Set &isect) {
if (k == 1)
return isect.cardinality();
assert(k > 1);
size_t current = 0;
for (auto vi : isect) {
auto cur_isect = isect.intersect(graph.out_neigh(vi));
if (cur_isect.cardinality() >= k - 2)
current += RecursiveStepCliqueCount(graph, k - 1, cur_isect);
}
return current;
}
template <typename Set, typename SGraph, typename Set2>
size_t CliqueCount(CSRGraph &g, size_t k = 4) {
size_t n = g.num_nodes();
SGraph set_graph = SGraph::FromCGraph(g);
size_t total = 0;
#pragma omp parallel for reduction(+ : total) schedule(dynamic, 64)
for (NodeId u = 0; u < n; ++u){
total += RecursiveStepCliqueCount(set_graph, k - 1, set_graph.out_neigh(u));
}
std::cout << "total " << k << "-cliques: " << total << std::endl;
return total;
} |
axpy.c | // Experimental test input for Accelerator directives
// simplest scalar*vector operations
// Liao 1/15/2013
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#define NUM_RUNS 10
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = (double)drand48();
}
}
/*serial version */
void axpy(REAL* x, REAL* y, long n, REAL a) {
int i;
for (i = 0; i < n; i++)
{
y[i] += a * x[i];
}
}
/* compare two arrays and return percentage of difference */
REAL check(REAL*A, REAL*B, int n)
{
int i;
REAL diffsum =0.0, sum = 0.0;
for (i = 0; i < n; i++) {
diffsum += fabs(A[i] - B[i]);
sum += fabs(B[i]);
}
return diffsum/sum;
}
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
/* //implementation of the following omp target region
#pragma omp target teams distribute parallel for device (0) map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
*/
#pragma omp target map(tofrom: y[0:n]) map(to: x[0:n])
{
printf("Parallel Kernel 1\n");
#pragma omp parallel for
for (i = 0; i < n; i++)
y[i] += a * x[i];
printf("Parallel Kernel 2\n");
#pragma omp parallel for
for (i = 0; i < n; i++)
y[i] += a * x[i];
printf("Serial Kernel\n");
for (i = 0; i < n; i++)
y[i] += a * x[i];
}
}
int main(int argc, char *argv[])
{
int n;
REAL *y_ompacc, *y, *x;
REAL a = 123.456;
n = 1 << 23; // 2^23, 8 million
fprintf(stderr, "Usage: axpy <n>, where the problem size is 2^n.\n");
if (argc >= 2) {
n = 1 << atoi(argv[1]);
}
y_ompacc = (REAL *) malloc(n * sizeof(REAL));
y = (REAL *) malloc(n * sizeof(REAL));
x = (REAL *) malloc(n * sizeof(REAL));
srand48(1<<12);
init(x, n);
init(y_ompacc, n);
memcpy(y, y_ompacc, n*sizeof(REAL));
axpy(x, y, n, a);
int i;
double elapsed = read_timer_ms();
for (i = 0; i < NUM_RUNS; i++)
axpy_ompacc(x, y, n, a);
elapsed = (read_timer_ms() - elapsed)/NUM_RUNS;
REAL checkresult = check(y_ompacc, y, n);
fprintf(stderr, "axpy(%d): checksum: %g, time: %0.2fms\n", n, checkresult, elapsed);
//assert (checkresult < 1.0e-10);
printf("%g", elapsed);
free(y_ompacc);
free(y);
free(x);
return 0;
}
|
GB_binop__rdiv_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp32)
// A*D function (colscale): GB (_AxD__rdiv_fp32)
// D*A function (rowscale): GB (_DxB__rdiv_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp32)
// C=scalar+B GB (_bind1st__rdiv_fp32)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp32)
// C=A+scalar GB (_bind2nd__rdiv_fp32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
IJVector_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJVector_Par interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJVectorCreatePar
*
* creates ParVector if necessary, and leaves a pointer to it as the
* hypre_IJVector object
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorCreatePar(hypre_IJVector *vector,
HYPRE_BigInt *IJpartitioning)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int num_procs, j;
HYPRE_BigInt global_n, *partitioning, jmin;
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
jmin = hypre_IJVectorGlobalFirstRow(vector);
global_n = hypre_IJVectorGlobalNumRows(vector);
partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < 2; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#else
jmin = IJpartitioning[0];
global_n = IJpartitioning[num_procs] - jmin;
partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < num_procs+1; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#endif
hypre_IJVectorObject(vector) =
hypre_ParVectorCreate(comm, global_n, (HYPRE_BigInt *) partitioning);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDestroyPar
*
* frees ParVector local storage of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDestroyPar(hypre_IJVector *vector)
{
return hypre_ParVectorDestroy((hypre_ParVector*)hypre_IJVectorObject(vector));
}
/******************************************************************************
*
* hypre_IJVectorInitializePar
*
* initializes ParVector of IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorInitializePar(hypre_IJVector *vector)
{
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
HYPRE_Int my_id;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_MPI_Comm_rank(comm,&my_id);
if (!partitioning)
{
if (print_level)
{
hypre_printf("No ParVector partitioning for initialization -- ");
hypre_printf("hypre_IJVectorInitializePar\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_VectorSize(local_vector) = (HYPRE_Int)(partitioning[1] - partitioning[0]);
#else
hypre_VectorSize(local_vector) = (HYPRE_Int)(partitioning[my_id+1] - partitioning[my_id]);
#endif
hypre_ParVectorInitialize(par_vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorInitialize(aux_vector);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetMaxOffProcElmtsPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParVector *aux_vector;
aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDistributePar
*
* takes an IJVector generated for one processor and distributes it
* across many processors according to vec_starts,
* if vec_starts is NULL, it distributes them evenly?
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDistributePar(hypre_IJVector *vector,
const HYPRE_Int *vec_starts)
{
hypre_ParVector *old_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_ParVector *par_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!old_vector)
{
if (print_level)
{
hypre_printf("old_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
hypre_ParVectorLocalVector(old_vector),
(HYPRE_BigInt *)vec_starts);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is unallocated ****\n");
}
hypre_error_in_arg(1);
}
hypre_ParVectorDestroy(old_vector);
hypre_IJVectorObject(vector) = par_vector;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorZeroValuesPar
*
* zeroes all local components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorZeroValuesPar(hypre_IJVector *vector)
{
HYPRE_Int my_id;
HYPRE_Int i;
HYPRE_BigInt vec_start, vec_stop;
HYPRE_Complex *data;
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_BigInt *partitioning;
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
partitioning = hypre_ParVectorPartitioning(par_vector);
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = partitioning[0];
vec_stop = partitioning[1];
#else
vec_start = partitioning[my_id];
vec_stop = partitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData( local_vector );
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < (HYPRE_Int)(vec_stop - vec_start); i++)
data[i] = 0.;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetValuesPar
*
* sets a potentially noncontiguous set of components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int j, k;
HYPRE_BigInt i, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be set, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, store
indices and values in auxiliary vector structure. If indices == NULL,
assume that num_values components are to be set in a block starting at
vec_start. NOTE: If indices == NULL off proc values are ignored!!! */
data = hypre_VectorData(local_vector);
if (indices)
{
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i >= vec_start && i <= vec_stop)
{
k = (HYPRE_Int)( i- vec_start);
data[k] = values[j];
}
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = (HYPRE_Int)(vec_stop - vec_start) +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] = values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAddToValuesPar
*
* adds to a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAddToValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int max_off_proc_elmts
= hypre_AuxParVectorMaxOffProcElmts(aux_vector);
HYPRE_BigInt *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
HYPRE_Int k;
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
/* if elements outside processor boundaries, store in off processor
stash */
if (!max_off_proc_elmts)
{
max_off_proc_elmts = 100;
hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParVectorOffProcData(aux_vector)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
}
else if (current_num_elmts + 1 > max_off_proc_elmts)
{
max_off_proc_elmts += 10;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParVectorMaxOffProcElmts(aux_vector)
= max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
}
off_proc_i[current_num_elmts] = i;
off_proc_data[current_num_elmts++] = values[j];
hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
}
else /* local values are added to the vector */
{
k = (HYPRE_Int)(i - vec_start);
data[k] += values[j];
}
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = (HYPRE_Int)(vec_stop - vec_start) +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] += values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAssemblePar
*
* currently tests existence of of ParVector object and its partitioning
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAssemblePar(hypre_IJVector *vector)
{
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_BigInt *partitioning;
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
partitioning = hypre_ParVectorPartitioning(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** ParVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (aux_vector)
{
HYPRE_Int off_proc_elmts, current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_Complex *off_proc_data;
current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);
hypre_MPI_Allreduce(¤t_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT,
hypre_MPI_SUM,comm);
if (off_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts,
current_num_elmts, off_proc_i, off_proc_data);
hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector), HYPRE_MEMORY_HOST);
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;
hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorGetValuesPar
*
* get a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorGetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_BigInt *indices,
HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int j, k;
HYPRE_BigInt i, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int ierr = 0;
HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1];
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, let
user know of catastrophe and exit. If indices == NULL, assume that
num_values components are to be retrieved from block starting at
vec_start */
if (indices)
{
for (i = 0; i < num_values; i++)
{
ierr += (indices[i] < vec_start);
ierr += (indices[i] >= vec_stop);
}
}
if (ierr)
{
if (print_level)
{
hypre_printf("indices beyond local range -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Indices specified are unusable ****\n");
}
hypre_error_in_arg(3);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
{
k = (HYPRE_Int)(indices[j] - vec_start);
values[j] = data[k];
}
}
else
{
if (num_values > (HYPRE_Int)(vec_stop-vec_start))
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
values[j] = data[j];
}
return hypre_error_flag;
}
/******************************************************************************
* hypre_IJVectorAssembleOffProcValsPar
*
* This is for handling set and get values calls to off-proc. entries - it is
* called from assemble. There is an alternate version for when the assumed
* partition is being used.
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_Complex *off_proc_data)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = ( hypre_ParVector *) hypre_IJVectorObject(vector);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, j, j2;
HYPRE_Int iii, indx, ip;
HYPRE_BigInt row, first_index;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends2;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_BigInt *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_BigInt *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_BigInt *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
HYPRE_Complex *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJVectorPartitioning(vector);
first_index = partitioning[my_id];
info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
proc_id_mem = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[i] = proc_id;
info[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
num_sends2 = 2*num_sends;
send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
int_buffer = hypre_CTAlloc(HYPRE_Int, num_sends2, HYPRE_MEMORY_HOST);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = info[i];
}
}
hypre_MPI_Allgather(&num_sends2,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(int_buffer,num_sends2,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=2)
{
if (recv_buf[j] == my_id)
num_recvs++;
}
recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
j2 = 0;
recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=2)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2++] = i;
recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];
}
if (j2 == num_recvs) break;
}
}
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(displs, HYPRE_MEMORY_HOST);
/* set up data to be sent to send procs */
/* send_i contains for each send proc
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_data = hypre_CTAlloc(HYPRE_Complex, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
recv_data = hypre_CTAlloc(HYPRE_Complex, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
proc_id = proc_id_mem[i];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
send_i[iii] = off_proc_i[i];
send_data[iii] = off_proc_data[i];
send_map_starts[indx]++;
}
hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
}
send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
hypre_TFree(send_procs, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_procs, HYPRE_MEMORY_HOST);
for (i=0; i < recv_vec_starts[num_recvs]; i++)
{
row = recv_i[i];
j = (HYPRE_Int)(row - first_index);
data[j] += recv_data[i];
}
hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_BigInt *off_proc_i,
HYPRE_Complex *off_proc_data)
{
HYPRE_Int myid;
HYPRE_BigInt global_first_row, global_num_rows;
HYPRE_Int i, j, in, k;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_Int ex_num_contacts = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_count;
HYPRE_Int num_recvs;
HYPRE_Int counter;
HYPRE_BigInt upper_bound;
HYPRE_Int num_real_procs;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL;
HYPRE_Int tmp_int;
HYPRE_Int obj_size_bytes, big_int_size, complex_size;
HYPRE_Int first_index;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_BigInt *ex_contact_buf=NULL;
HYPRE_Complex *vector_data;
HYPRE_Complex value;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_rows = hypre_IJVectorGlobalNumRows(vector);
global_first_row = hypre_IJVectorGlobalFirstRow(vector);
/* verify that we have created the assumed partition */
if (hypre_IJVectorAssumedPart(vector) == NULL)
{
hypre_IJVectorCreateAssumedPartition(vector);
}
apart = (hypre_IJAssumedPart*) hypre_IJVectorAssumedPart(vector);
/* get the assumed processor id for each row */
a_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
row_list = hypre_CTAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
if (current_num_elmts > 0)
{
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
row_list[i] = row;
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_rows, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < current_num_elmts; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact
assumed processors and find out who the actual row owner is - we
will contact with a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< current_num_elmts; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1];
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_row,
global_num_rows, &range_start, &range_end);
}
}
/*finish the starts*/
ex_contact_vec_starts[counter] = counter*2;
/*finish the last range*/
if (counter > 0)
ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1];
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 4,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by an upper bound for the
range. */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
a_proc_id = NULL;
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = (HYPRE_Int)response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < current_num_elmts && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real procesors ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int and HYPRE_Complex data. (row number and value) - we will send
everything as a void since we may not know the rel sizes of ints and
doubles */
/* first find out how many elements to send per proc - so we can do
storage */
complex_size = sizeof(HYPRE_Complex);
big_int_size = sizeof(HYPRE_BigInt);
obj_size_bytes = hypre_max(big_int_size, complex_size);
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < current_num_elmts; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
}
}
}
/* calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2* num_rows_per_proc[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order */
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST);
for (i=0; i < current_num_elmts; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
prev_id = -1;
for (i=0; i < current_num_elmts; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in < 0)
{
in = -in - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt,1 , HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* add value */
tmp_complex = off_proc_data[i];
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* increment the indexes to keep track of where we are - fix later */
ex_contact_vec_starts[indx] = in;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* now get the info in send_proc_obj_d */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id = NULL; /* don't care who sent it to us */
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 5,
comm, (void **) &response_buf, &response_buf_starts);
/***********************************/
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and either set or add to the
vector data */
num_recvs = send_proc_obj.length;
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
first_index = hypre_ParVectorFirstIndex(par_vector);
for (i=0; i < num_recvs; i++)
{
indx = recv_starts[i];
/* get the number of rows for this recv */
hypre_TMemcpy( &row_count, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < row_count; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* value */
hypre_TMemcpy( &value, recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
k = (HYPRE_Int)(row - first_index - global_first_row);
vector_data[k] += value;
}
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#endif
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2021, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#include "facedetection_export.h"
//#define _ENABLE_AVX512 //Please enable it if X64 CPU
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image!
/*
DO NOT EDIT the following code if you don't really understand it.
*/
#if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#else
#define _MAX_UINT8_VALUE 255
#endif
//#if defined(_ENABLE_AVX512)
#define _MALLOC_ALIGN 512
//#elif defined(_ENABLE_AVX2)
// #define _MALLOC_ALIGN 256
// #else
// #define _MALLOC_ALIGN 128
// #endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX512 and NEON at the same time.
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX and NEON at the same time.
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2)
#error Cannot enable the two of AVX512 and AVX2 at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
int lm[10];
}FaceRect;
typedef struct ConvInfoStruct_ {
int channels;
int num_filters;
bool is_depthwise;
bool is_pointwise;
bool with_relu;
float* pWeights;
float* pBiases;
}ConvInfoStruct;
template <typename T>
class CDataBlob
{
public:
T * data;
int rows;
int cols;
int channels; //in element
int channelStep; //in byte
public:
CDataBlob() {
data = 0;
rows = 0;
cols = 0;
channels = 0;
channelStep = 0;
}
CDataBlob(int r, int c, int ch)
{
data = 0;
create(r, c, ch);
//#warning "confirm later"
//setZero();
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data)
myFree(&data);
rows = cols = channels = channelStep = 0;
}
void setZero()
{
if(data)
memset(data, 0, channelStep * rows * cols);
}
inline bool isEmpty()
{
return (rows <= 0 || cols <= 0 || channels == 0 || data == NULL);
}
bool create(int r, int c, int ch)
{
setNULL();
rows = r;
cols = c;
channels = ch;
//alloc space for int8 array
int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T*)myAlloc(size_t(rows) * cols * this->channelStep);
if (data == NULL)
{
cerr << "Failed to alloc memeory for uint8 data blob: "
<< rows << "*"
<< cols << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
// for (int r = 0; r < this->rows; r++)
// {
// for (int c = 0; c < this->cols; c++)
// {
// int pixel_end = this->channelStep / sizeof(T);
// T * pI = this->ptr(r, c);
// for (int ch = this->channels; ch < pixel_end; ch++)
// pI[ch] = 0;
// }
// }
return true;
}
inline T * ptr(int r, int c)
{
if( r < 0 || r >= this->rows || c < 0 || c >= this->cols )
return NULL;
return (this->data + (size_t(r) * this->cols + c) * this->channelStep /sizeof(T));
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(float) != typeid(T))
{
cerr << "DataBlob must be float in the current version." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
//only 27 elements used for each pixel
create((imgHeight+1)/2, (imgWidth+1)/2, 32);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
setZero();
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->rows; r++)
{
for (int c = 0; c < this->cols; c++)
{
T * pData = this->ptr(r, c);
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx;
//int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
int output_channel_offset = ((fy + 1) * 3 + fx + 1) ; //3x3 filters, 3-channel image
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset+9] = (pImgData[1]);
pData[output_channel_offset+18] = (pImgData[2]);
}
}
}
}
return true;
}
inline T getElement(int r, int c, int ch)
{
if (this->data)
{
if (r >= 0 && r < this->rows &&
c >= 0 && c < this->cols &&
ch >= 0 && ch < this->channels)
{
T * p = this->ptr(r, c);
return (p[ch]);
}
}
return (T)(0);
}
friend ostream &operator<<(ostream &output, CDataBlob &dataBlob)
{
output << "DataBlob Size (channels, rows, cols) = ("
<< dataBlob.channels
<< ", " << dataBlob.rows
<< ", " << dataBlob.cols
<< ")" << endl;
if( dataBlob.rows * dataBlob.cols * dataBlob.channels <= 16)
{ //print the elements only when the total number is less than 64
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int r = 0; r < dataBlob.rows; r++)
{
output << "(";
for (int c = 0; c < dataBlob.cols; c++)
{
T * p = dataBlob.ptr(r, c);
if(sizeof(T)<4)
output << (int)(p[ch]);
else
output << p[ch];
if (c != dataBlob.cols - 1)
output << ", ";
}
output << ")" << endl;
}
}
}
else
output << "(" << dataBlob.getElement(0,0,0) << ", ..., "
<< dataBlob.getElement(dataBlob.rows-1, dataBlob.cols-1, dataBlob.channels-1)
<< endl;
return output;
}
};
template <typename T>
class Filters{
public:
int channels;
int num_filters;
bool is_depthwise;
bool is_pointwise;
bool with_relu;
CDataBlob<T> weights;
CDataBlob<T> biases;
Filters()
{
channels = 0;
num_filters = 0;
is_depthwise = false;
is_pointwise = false;
with_relu = true;
}
//bool init(ConvInfoStruct * pInfo)
Filters & operator=(ConvInfoStruct & convinfo)
{
if (typeid(float) != typeid(T))
{
cerr << "The data type must be float in this version." << endl;
return *this;
}
if (typeid(float*) != typeid(convinfo.pWeights) ||
typeid(float*) != typeid(convinfo.pBiases))
{
cerr << "The data type of the filter parameters must be float in this version." << endl;
return *this;
}
this->channels = convinfo.channels;
this->num_filters = convinfo.num_filters;
this->is_depthwise = convinfo.is_depthwise;
this->is_pointwise = convinfo.is_pointwise;
this->with_relu = convinfo.with_relu;
if(!this->is_depthwise && this->is_pointwise) //1x1 point wise
{
this->weights.create(1, num_filters, channels);
}
else if(this->is_depthwise && !this->is_pointwise) //3x3 depth wise
{
this->weights.create(1, 9, channels);
}
else
{
cerr << "Unsupported filter type. Only 1x1 point-wise and 3x3 depth-wise are supported." << endl;
return *this;
}
this->biases.create(1, 1, num_filters);
//the format of convinfo.pWeights/biases must meet the format in this->weigths/biases
for(int fidx = 0; fidx < this->weights.cols; fidx++)
memcpy(this->weights.ptr(0,fidx),
convinfo.pWeights + channels * fidx ,
channels * sizeof(T));
memcpy(this->biases.ptr(0,0), convinfo.pBiases, sizeof(T) * this->num_filters);
return *this;
}
};
bool convolution(CDataBlob<float> & inputData, Filters<float> & filters, CDataBlob<float> & outputData, bool do_relu = true);
bool convolutionDP(CDataBlob<float> & inputData,
Filters<float> & filtersP, Filters<float> & filtersD,
CDataBlob<float> & outputData, bool do_relu = true);
bool convolution4layerUnit(CDataBlob<float> & inputData,
Filters<float> & filtersP1, Filters<float> & filtersD1,
Filters<float> & filtersP2, Filters<float> & filtersD2,
CDataBlob<float> & outputData, bool do_relu = true);
bool maxpooling2x2S2(CDataBlob<float> &inputData, CDataBlob<float> &outputData);
template<typename T>
bool concat4(CDataBlob<T> &inputData1, CDataBlob<T> &inputData2, CDataBlob<T> &inputData3, CDataBlob<T> &inputData4, CDataBlob<T> &outputData);
bool priorbox( int feature_width, int feature_height,
int img_width, int img_height,
int step, int num_sizes,
float * pWinSizes, CDataBlob<float> & outputData);
bool softmax1vector2class(CDataBlob<float> &inputOutputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(CDataBlob<T> &inputData, CDataBlob<T> & outputData);
bool softmax1vector2class(CDataBlob<float> &inputOutputData);
bool clamp1vector(CDataBlob<float> &inputOutputData);
bool detection_output(CDataBlob<float> & priorbox,
CDataBlob<float> & loc,
CDataBlob<float> & conf,
//CDataBlob<float> & iou,
float overlap_threshold,
float confidence_threshold,
int top_k,
int keep_top_k,
CDataBlob<float> & outputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
openmp-ex33.c | /* Although OpenMP emphasizes data parallelism, there are also constructs for
* instruction parallelism */
#include <stdio.h>
#include <omp.h>
int main(void)
{
int clients[2] = {-1, -1};
#pragma omp parallel
{
int id = omp_get_thread_num();
#pragma omp sections
{
#pragma omp section
{
int found[2] = {0, 0};
printf ("I am %d and I am the server.\n",id);
while (1) {
int i;
for (i = 0; i < 2; i++) {
if (!found[i] && clients[i] >= 0) {
found[i] = 1;
printf("Thread %d has checked in as client %d\n",clients[i],i);
}
}
if (found[0] && found[1]) break;
}
}
#pragma omp section
{
printf("I am %d and I am client 0.\n",id);
clients[0] = id;
}
#pragma omp section
{
printf("I am %d and I am client 1.\n",id);
clients[1] = id;
}
}
}
return 0;
}
|
lap.h | #include <cassert>
#include <cstdio>
#include <limits>
#include <memory>
#include <vector>
#include "knn.h"
#ifdef __GNUC__
#define always_inline __attribute__((always_inline)) inline
#define restrict __restrict__
#elif _WIN32
#define always_inline __forceinline
#define restrict __restrict
#else
#define always_inline inline
#define restrict
#endif
#undef max
#define INF 1e6
template <typename idx, typename cost>
always_inline std::tuple<cost, cost, idx, idx>
find_umins_plain(
idx dim, idx i, const cost *restrict assign_cost,
const cost *restrict v) {
const cost *local_cost = &assign_cost[i * dim];
cost umin = local_cost[0] - v[0];
idx j1 = 0;
idx j2 = -1;
cost usubmin = std::numeric_limits<cost>::max();
for (idx j = 1; j < dim; j++) {
cost h = local_cost[j] - v[j];
if (h < usubmin) {
if (h >= umin) {
usubmin = h;
j2 = j;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = j;
}
}
}
return std::make_tuple(umin, usubmin, j1, j2);
}
// MSVC++ has an awful AVX2 support which does not allow to compile the code
#if defined(__AVX2__) && !defined(_WIN32)
#include <immintrin.h>
#define FLOAT_MIN_DIM 64
#define DOUBLE_MIN_DIM 100000 // 64-bit code is actually always slower
template <typename idx>
always_inline std::tuple<float, float, idx, idx>
find_umins(
idx dim, idx i, const float *restrict assign_cost,
const float *restrict v) {
if (dim < FLOAT_MIN_DIM) {
return find_umins_plain(dim, i, assign_cost, v);
}
const float *local_cost = assign_cost + i * dim;
__m256i idxvec = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
__m256i j1vec = _mm256_set1_epi32(-1), j2vec = _mm256_set1_epi32(-1);
__m256 uminvec = _mm256_set1_ps(std::numeric_limits<float>::max()),
usubminvec = _mm256_set1_ps(std::numeric_limits<float>::max());
for (idx j = 0; j < dim - 7; j += 8) {
__m256 acvec = _mm256_loadu_ps(local_cost + j);
__m256 vvec = _mm256_loadu_ps(v + j);
__m256 h = _mm256_sub_ps(acvec, vvec);
__m256 cmp = _mm256_cmp_ps(h, uminvec, _CMP_LE_OQ);
usubminvec = _mm256_blendv_ps(usubminvec, uminvec, cmp);
j2vec = _mm256_blendv_epi8(
j2vec, j1vec, reinterpret_cast<__m256i>(cmp));
uminvec = _mm256_blendv_ps(uminvec, h, cmp);
j1vec = _mm256_blendv_epi8(
j1vec, idxvec, reinterpret_cast<__m256i>(cmp));
cmp = _mm256_andnot_ps(cmp, _mm256_cmp_ps(h, usubminvec, _CMP_LT_OQ));
usubminvec = _mm256_blendv_ps(usubminvec, h, cmp);
j2vec = _mm256_blendv_epi8(
j2vec, idxvec, reinterpret_cast<__m256i>(cmp));
idxvec = _mm256_add_epi32(idxvec, _mm256_set1_epi32(8));
}
alignas(__m256) float uminmem[8], usubminmem[8];
alignas(__m256) int32_t j1mem[8], j2mem[8];
_mm256_store_ps(uminmem, uminvec);
_mm256_store_ps(usubminmem, usubminvec);
_mm256_store_si256(reinterpret_cast<__m256i*>(j1mem), j1vec);
_mm256_store_si256(reinterpret_cast<__m256i*>(j2mem), j2vec);
idx j1 = -1, j2 = -1;
float umin = std::numeric_limits<float>::max(),
usubmin = std::numeric_limits<float>::max();
for (int vi = 0; vi < 8; vi++) {
float h = uminmem[vi];
if (h < usubmin) {
idx jnew = j1mem[vi];
if (h >= umin) {
usubmin = h;
j2 = jnew;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = jnew;
}
}
}
for (int vi = 0; vi < 8; vi++) {
float h = usubminmem[vi];
if (h < usubmin) {
usubmin = h;
j2 = j2mem[vi];
}
}
for (idx j = dim & 0xFFFFFFF8u; j < dim; j++) {
float h = local_cost[j] - v[j];
if (h < usubmin) {
if (h >= umin) {
usubmin = h;
j2 = j;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = j;
}
}
}
return std::make_tuple(umin, usubmin, j1, j2);
}
template <typename idx>
always_inline std::tuple<double, double, idx, idx>
find_umins(
idx dim, idx i, const double *restrict assign_cost,
const double *restrict v) {
if (dim < DOUBLE_MIN_DIM) {
return find_umins_plain(dim, i, assign_cost, v);
}
const double *local_cost = assign_cost + i * dim;
__m256i idxvec = _mm256_setr_epi64x(0, 1, 2, 3);
__m256i j1vec = _mm256_set1_epi64x(-1), j2vec = _mm256_set1_epi64x(-1);
__m256d uminvec = _mm256_set1_pd(std::numeric_limits<double>::max()),
usubminvec = _mm256_set1_pd(std::numeric_limits<double>::max());
for (idx j = 0; j < dim - 3; j += 4) {
__m256d acvec = _mm256_loadu_pd(local_cost + j);
__m256d vvec = _mm256_loadu_pd(v + j);
__m256d h = _mm256_sub_pd(acvec, vvec);
__m256d cmp = _mm256_cmp_pd(h, uminvec, _CMP_LE_OQ);
usubminvec = _mm256_blendv_pd(usubminvec, uminvec, cmp);
j2vec = _mm256_blendv_epi8(
j2vec, j1vec, reinterpret_cast<__m256i>(cmp));
uminvec = _mm256_blendv_pd(uminvec, h, cmp);
j1vec = _mm256_blendv_epi8(
j1vec, idxvec, reinterpret_cast<__m256i>(cmp));
cmp = _mm256_andnot_pd(cmp, _mm256_cmp_pd(h, usubminvec, _CMP_LT_OQ));
usubminvec = _mm256_blendv_pd(usubminvec, h, cmp);
j2vec = _mm256_blendv_epi8(
j2vec, idxvec, reinterpret_cast<__m256i>(cmp));
idxvec = _mm256_add_epi64(idxvec, _mm256_set1_epi64x(4));
}
alignas(__m256d) double uminmem[4], usubminmem[4];
alignas(__m256d) int64_t j1mem[4], j2mem[4];
_mm256_store_pd(uminmem, uminvec);
_mm256_store_pd(usubminmem, usubminvec);
_mm256_store_si256(reinterpret_cast<__m256i*>(j1mem), j1vec);
_mm256_store_si256(reinterpret_cast<__m256i*>(j2mem), j2vec);
idx j1 = -1, j2 = -1;
double umin = std::numeric_limits<double>::max(),
usubmin = std::numeric_limits<double>::max();
for (int vi = 0; vi < 4; vi++) {
double h = uminmem[vi];
if (h < usubmin) {
idx jnew = j1mem[vi];
if (h >= umin) {
usubmin = h;
j2 = jnew;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = jnew;
}
}
}
for (int vi = 0; vi < 4; vi++) {
double h = usubminmem[vi];
if (h < usubmin) {
usubmin = h;
j2 = j2mem[vi];
}
}
for (idx j = dim & 0xFFFFFFFCu; j < dim; j++) {
double h = local_cost[j] - v[j];
if (h < usubmin) {
if (h >= umin) {
usubmin = h;
j2 = j;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = j;
}
}
}
return std::make_tuple(umin, usubmin, j1, j2);
}
#else // __AVX__
#define find_umins find_umins_plain
#endif // __AVX__
/// @brief Jonker-Volgenant algorithm.
/// @param dim in problem size
/// @param assign_cost in cost matrix
/// @param verbose in indicates whether to report the progress to stdout
/// @param rowsol out column assigned to row in solution / size dim
/// @param colsol out row assigned to column in solution / size dim
/// @param u out dual variables, row reduction numbers / size dim
/// @param v out dual variables, column reduction numbers / size dim
/// @return achieved minimum assignment cost
template <typename idx, typename cost>
cost lap(int dim, const cost *restrict _assign_cost, bool verbose,
idx *restrict rowsol, idx *restrict colsol,
cost *restrict u, cost *restrict v, int k_value) {
// printf("k_value %d\n", k_value);
float *assign_cost = new float[dim*dim];
for (int i = 0; i < dim * dim; i++){
assign_cost[i] = _assign_cost[i];
}
knn_sparse(assign_cost, dim, dim, k_value, false, 0);
for (int i = 0; i < dim * dim; i++){
if (assign_cost[i] < 1e-6){
assign_cost[i] = INF;
}
}
auto free = std::unique_ptr<idx[]>(new idx[dim]); // list of unassigned rows.
auto collist = std::unique_ptr<idx[]>(new idx[dim]); // list of columns to be scanned in various ways.
auto matches = std::unique_ptr<idx[]>(new idx[dim]); // counts how many times a row could be assigned.
auto d = std::unique_ptr<cost[]>(new cost[dim]); // 'cost-distance' in augmenting path calculation.
auto pred = std::unique_ptr<idx[]>(new idx[dim]); // row-predecessor of column in augmenting/alternating path.
auto row_connected = std::unique_ptr<std::vector<idx>[]>(new std::vector<idx>[dim]); // columns connected to row
auto collist_ptr = std::unique_ptr<idx[]>(new idx[dim]);
// verbose = true;
// init how many times a row will be assigned in the column reduction.
#if _OPENMP >= 201307
#pragma omp simd
#endif
for (idx i = 0; i < dim; i++) {
matches[i] = 0;
}
// COLUMN REDUCTION
for (idx j = dim - 1; j >= 0; j--) { // reverse order gives better results.
// find minimum cost over rows.
cost min = std::numeric_limits<cost>::max();
idx imin = -1;
for (idx i = 0; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
if (local_cost[j] < INF) {
row_connected[i].push_back(j);
if (local_cost[j] < min) {
min = local_cost[j];
imin = i;
}
}
}
v[j] = min;
if (++matches[imin] == 1) {
// init assignment if minimum row assigned for first time.
rowsol[imin] = j;
colsol[j] = imin;
} else {
colsol[j] = -1; // row already assigned, column not assigned.
}
}
if (verbose) {
printf("lapjv: COLUMN REDUCTION finished\n");
}
// REDUCTION TRANSFER
idx numfree = 0;
for (idx i = 0; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
if (matches[i] == 0) { // fill list of unassigned 'free' rows.
free[numfree++] = i;
} else if (matches[i] == 1) { // transfer reduction from rows that are assigned once.
idx j1 = rowsol[i];
cost min = std::numeric_limits<cost>::max();
// for (idx j = 0; j < dim; j++) {
// if (j != j1) {
// if (local_cost[j] - v[j] < min) {
// min = local_cost[j] - v[j];
// }
// }
// }
// Rewrite with sparse matrix
idx len = row_connected[i].size();
for (idx id_j = 0; id_j < len; id_j++) {
idx j = row_connected[i][id_j];
if (j != j1) {
if (local_cost[j] - v[j] < min) {
min = local_cost[j] - v[j];
}
}
}
v[j1] = v[j1] - min;
}
}
if (verbose) {
printf("lapjv: REDUCTION TRANSFER finished\n");
}
// // AUGMENTING ROW REDUCTION
// for (int loopcnt = 0; loopcnt < 2; loopcnt++) { // loop to be done twice.
// // scan all free rows.
// // in some cases, a free row may be replaced with another one to be scanned next.
// idx k = 0;
// idx prevnumfree = numfree;
// numfree = 0; // start list of rows still free after augmenting row reduction.
// while (k < prevnumfree) {
// idx i = free[k++];
// // find minimum and second minimum reduced cost over columns.
// cost umin, usubmin;
// idx j1, j2;
// std::tie(umin, usubmin, j1, j2) = find_umins(dim, i, assign_cost, v);
// idx i0 = colsol[j1];
// cost vj1_new = v[j1] - (usubmin - umin);
// bool vj1_lowers = vj1_new < v[j1]; // the trick to eliminate the epsilon bug
// if (vj1_lowers) {
// // change the reduction of the minimum column to increase the minimum
// // reduced cost in the row to the subminimum.
// v[j1] = vj1_new;
// } else if (i0 >= 0) { // minimum and subminimum equal.
// // minimum column j1 is assigned.
// // swap columns j1 and j2, as j2 may be unassigned.
// j1 = j2;
// i0 = colsol[j2];
// }
// // (re-)assign i to j1, possibly de-assigning an i0.
// rowsol[i] = j1;
// colsol[j1] = i;
// if (i0 >= 0) { // minimum column j1 assigned earlier.
// if (vj1_lowers) {
// // put in current k, and go back to that k.
// // continue augmenting path i - j1 with i0.
// free[--k] = i0;
// } else {
// // no further augmenting reduction possible.
// // store i0 in list of free rows for next phase.
// free[numfree++] = i0;
// }
// }
// }
// if (verbose) {
// printf("lapjv: AUGMENTING ROW REDUCTION %d / %d\n", loopcnt + 1, 2);
// }
// } // for loopcnt
// AUGMENT SOLUTION for each free row.
for (idx f = 0; f < numfree; f++) {
idx endofpath;
idx freerow = free[f]; // start row of augmenting path.
if (verbose) {
printf("lapjv: AUGMENT SOLUTION row %d [%d / %d]\n",
freerow, f + 1, numfree);
}
// Dijkstra shortest path algorithm.
// runs until unassigned column added to shortest path tree.
#if _OPENMP >= 201307
#pragma omp simd
#endif
for (idx j = 0; j < dim; j++) {
d[j] = assign_cost[freerow * dim + j] - v[j];
pred[j] = freerow;
collist[j] = j; // init column list.
// Rewrite with sparse matrix
collist_ptr[j] = j;
}
idx low = 0; // columns in 0..low-1 are ready, now none.
idx up = 0; // columns in low..up-1 are to be scanned for current minimum, now none.
// columns in up..dim-1 are to be considered later to find new minimum,
// at this stage the list simply contains all columns
bool unassigned_found = false;
// initialized in the first iteration: low == up == 0
idx last = 0;
cost min = 0;
do {
if (up == low) { // no more columns to be scanned for current minimum.
last = low - 1;
// scan columns for up..dim-1 to find all indices for which new minimum occurs.
// store these indices between low..up-1 (increasing up).
min = d[collist[up++]];
for (idx k = up; k < dim; k++) {
idx j = collist[k];
cost h = d[j];
if (h <= min) {
if (h < min) { // new minimum.
up = low; // restart list at index low.
min = h;
}
// new index with same minimum, put on undex up, and extend list.
// collist[k] = collist[up];
// collist[up++] = j;
// Rewrite with sparse matrix
collist_ptr[j] = up;
collist_ptr[collist[up]] = k;
collist[k] = collist[up];
collist[up++] = j;
}
}
// check if any of the minimum columns happens to be unassigned.
// if so, we have an augmenting path right away.
for (idx k = low; k < up; k++) {
if (colsol[collist[k]] < 0) {
endofpath = collist[k];
unassigned_found = true;
break;
}
}
}
if (!unassigned_found) {
// update 'distances' between freerow and all unscanned columns, via next scanned column.
idx j1 = collist[low];
low++;
idx i = colsol[j1];
const cost *local_cost = &assign_cost[i * dim];
cost h = local_cost[j1] - v[j1] - min;
// for (idx k = up; k < dim; k++) {
// idx j = collist[k];
// Rewrite with sparse matrix
unsigned len = row_connected[i].size();
for (idx id_j = 0; id_j < len; id_j++) {
idx j = row_connected[i][id_j];
idx k = collist_ptr[j];
if (k < low) continue;
cost v2 = local_cost[j] - v[j] - h;
if (v2 < d[j]) {
pred[j] = i;
if (v2 == min) { // new column found at same minimum value
if (colsol[j] < 0) {
// if unassigned, shortest augmenting path is complete.
endofpath = j;
unassigned_found = true;
break;
} else { // else add to list to be scanned right away.
// collist[k] = collist[up];
// collist[up++] = j;
// Rewrite with sparse matrix
collist_ptr[j] = up;
collist_ptr[collist[up]] = k;
collist[k] = collist[up];
collist[up++] = j;
}
}
d[j] = v2;
}
}
}
} while (!unassigned_found);
// update column prices.
#if _OPENMP >= 201307
#pragma omp simd
#endif
for (idx k = 0; k <= last; k++) {
idx j1 = collist[k];
v[j1] = v[j1] + d[j1] - min;
}
// Output
// printf("%d\n", endofpath);
// reset row and column assignments along the alternating path.
{
idx i;
do {
i = pred[endofpath];
colsol[endofpath] = i;
idx j1 = endofpath;
endofpath = rowsol[i];
rowsol[i] = j1;
} while (i != freerow);
}
}
if (verbose) {
printf("lapjv: AUGMENT SOLUTION finished\n");
}
// calculate optimal cost.
cost lapcost = 0;
#if _OPENMP >= 201307
#pragma omp simd reduction(+:lapcost)
#endif
for (idx i = 0; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
idx j = rowsol[i];
u[i] = local_cost[j] - v[j];
lapcost += local_cost[j];
}
if (verbose) {
printf("lapjv: optimal cost calculated\n");
}
return lapcost;
}
|
pmv-OpenMP-reduction.c | /*
Multiplica una matriz por un vector
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
//#define PRINTF_ALL// comentar para quitar el printf ...
// que imprime todos los componentes
int main(int argc, char** argv){
int i, j;
double cgt1,cgt2; double ncgt; //para tiempo de ejecución
//Leer argumento de entrada (nº de componentes del vector)
if (argc<2){
printf("Falta tamaño\n");
exit(-1);
}
unsigned int N = atoi(argv[1]);
double *v1;
double *v2;
double **M;
v1 = (double*) malloc(N*sizeof(double)); // malloc necesita el tamaño en bytes
v2 = (double*) malloc(N*sizeof(double));
M = (double**) malloc(N*sizeof(double*));
for(i = 0; i < N; i++)
M[i] = (double*) malloc(N*sizeof(double));
for(i=0; i<N; i++){
v1[i] = N*0.1+i*0.1;
for(j=0; j<N; j++)
M[i][j] = N*0.1-j*0.1;
}
double suma;
cgt1 = omp_get_wtime();
for(i = 0; i < N; i++){
suma = 0;
#pragma omp parallel for reduction(+:suma)
for(j = 0; j < N; j++)
suma += M[i][j]*v1[j];
v2[i] = suma;
}
cgt2 = omp_get_wtime();
ncgt = cgt2 - cgt1;
//Imprimir resultado y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño:%u\n",ncgt,N);
printf("Resulado:\n");
for(i=0; i<N; i++)
printf("V[%d] = %8.6f /",i,v2[i]);
printf("\n");
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V[0]=%8.6f / / V[%d]=%8.6f /\n",ncgt,N,v2[0],N-1,v2[N-1]);
#endif
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
for(i = 0; i < N; i++)
free(M[i]);
free(M);
return 0;
}
|
gemm.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "common/log.h"
#include "memory/t_malloc.h"
#ifdef _OPENMP
#include <omp.h>
#endif
// 矩阵取值运算宏,假设矩阵按行存储
#define A(i, j) A[(i)*lda + (j)]
#define B(i, j) B[(i)*ldb + (j)]
#define C(i, j) C[(i)*ldc + (j)]
#if __aarch64__
#define MR_INT8 4
#define NR_INT8 2
#define MR 6
#define NR 16
#else
#define MR_INT8 4
#define NR_INT8 2
#define MR 6
#define NR 8
#endif
#define s_min(i, j) ((i) < (j) ? (i) : (j))
namespace paddle_mobile {
namespace operators {
namespace math {
class Gemm {
public:
/*
// 将 A 矩阵分块复制到连续内存(ColMajor)
void PackMatrixA(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
// 将 B 矩阵分块复制到连续内存(ColMajor)
void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
*/
typedef void (Gemm::*FnPack)(int, int, int, const float *, int, float *);
typedef void (Gemm::*FnAddDot)(int, const float *, const float *, float *,
int);
FnPack procPackA;
FnPack procPackB;
FnAddDot procAddDot;
// 将 A 矩阵分块复制到连续内存(RowMajor)
void PackMatrixA_4r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_6r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_8r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_omp_6r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_omp_8r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
// 将 B 矩阵分块复制到连续内存(RowMajor)
void PackMatrixB_8c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_12c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_16c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_8c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_12c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_16c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
// 分块矩阵乘法
void InnerKernel(int mc, int nc, float alpha, const float *a, const float *b,
float beta, float *c, float *C, int ldc, bool relu);
void InnerKernelWithBias(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *bias);
void InnerKernelWithBn(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale, float *new_bias);
void InnerKernelWithBnAdd(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale,
float *new_bias, float *bias);
void InnerKernelWithPRelu(int mc, int nc, const float *a, const float *b,
float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
/*
// 向量矩阵乘法 (M = 1)
void VectorKernel(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu);
void VectorKernelWithBn(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float
*C, int ldc, bool relu, float *new_scale, float *new_bias);
*/
// 计算一个更小的 C 矩阵分块
void AddDot4x4(int k, const float *a, const float *b, float *c, int ldc);
void AddDot4x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot8x12(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x16(int k, const float *a, const float *b, float *c, int ldc);
// 分块矩阵乘法结果回写
// C = A * B
void WriteBasic(int mc, int nc, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void WriteWithAlphaBeta(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C
void WriteWithAdd(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + bias
void WriteWithAddV1(int mc, int nc, float *c, float *C, int ldc, float *bias);
// C = A * B + C, relu(C)
void WriteWithAddRelu(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C,prelu(C)
void WriteWithAddPRelu(int mc, int nc, float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// C = A * B + bias ,relu(C)
void WriteWithAddReluV1(int mc, int nc, float *c, float *C, int ldc,
float *bias);
// C = A * B, batchnorm(C)
void WriteWithBn(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void WriteWithBnRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
void WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias, float *bias1);
/*
// 向量矩阵乘法结果回写
// C = A * B
void VecWriteBasic(int n, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void VecWriteWithAlphaBeta(int n, float *c, float *C, int ldc);
// C = A * B + C
void VecWriteWithAdd(int n, float *c, float *C, int ldc);
// C = A * B + C, relu(C)
void VecWriteWithAddRelu(int n, float *c, float *C, int ldc);
// C = A * B, batchnorm(C)
void VecWriteWithBn(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void VecWriteWithBnRelu(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias);
*/
// 32位 float 矩阵乘法
void Sgemm(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc, bool relu,
float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom
void SgemmWithBn(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *new_scale, float *new_bias, float *bias);
void SgemmWithPRelu(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 32位 float 矩阵乘法(openmp 多线程版本)
void Sgemm_omp(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom(openmp 多线程版本)
void SgemmWithBn_omp(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float *C,
int ldc, bool relu, float *new_scale, float *new_bias,
float *bias);
void SgemmWithPRelu_omp(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 8 bits function cluster begins
// 8 bits int small block inner product
void AddDot4x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot4x2(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot6x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
// 8 bits int inner product
template <typename Otype>
void InnerKernel(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu);
template <typename Otype>
void InnerKernelWithBias(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
// 8 bits int pack function
void PackMatrixA_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixA_4r_16(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixA_6r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_2c_16(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixB_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_omp_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r_16(int32_t m, int32_t k, int32_t m_tail,
const int8_t *A, int32_t lda, int8_t *buffer);
void PackMatrixB_omp_2c_16(int32_t k, int32_t n, int32_t n_tail,
const int8_t *B, int32_t ldb, int8_t *buffer);
// 8 bits int matrix product
template <typename Itype, typename Btype, typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
template <typename Itype, typename Btype, typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, int32_t *bias, bool addOnRow = false);
// 8 bits int write back
// C = A * B
void WriteBasic(int32_t mc, int32_t nc, int32_t *c, int32_t *C, int32_t ldc);
// C = A * B + bias, scale * relu(C)
void WriteWithAddReluScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on column
void WriteWithAddScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on row
void WriteWithAddScaleT(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
private:
int MC = 0;
int KC = 0;
int NC = 0;
// 32位 float
float *packedA;
float *packedB;
float *packedC;
float *zero;
// 8 bits int
int8_t *packedA_int8;
int8_t *packedB_int8;
int32_t *packedC_int32;
int8_t *zero_int8;
};
// 8 bits int matrix product (m*k x k*n)
template <typename Otype>
void Gemm::Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow) {
// L1 data cache is 32 kib (Per Contex-A57, Contex-A72, Contex-A73)
// L2 cache is 0.5~4 Mib (Contex-A72 cluster)
int32_t L1 = 32 * 1024;
int32_t L2 = 512 * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
MC = L1 / (KC * sizeof(int8_t));
NC = L2 / (KC * sizeof(int8_t));
// make sure MC is multiple of MR_INT8, and NC is multiple of NR_INT8
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// DLOG << "mblock_num = " << mblock_num << ", MC = " << MC << "\n";
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// DLOG << "nblock_num = " << nblock_num << ", NC = " << NC << "\n";
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC));
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
int32_t mc, nc;
for (int32_t j = 0; j < n; j += NC) {
nc = s_min(n - j, NC);
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, packedB_int8);
for (int32_t i = 0; i < m; i += MC) {
mc = s_min(m - i, MC);
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, packedA_int8);
if (bias == nullptr) {
InnerKernel(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + j,
addOnRow);
} else {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + i,
addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
// 8 bits int matrix product (m*k x k*n), omp version
template <typename Otype>
void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
const int8_t *A, int32_t lda, const int8_t *B, int32_t ldb,
float beta, Otype *C, int32_t ldc, bool relu,
int32_t *bias, bool addOnRow) {
#ifdef _OPENMP
int32_t max_threads = omp_get_max_threads();
#else
int32_t max_threads = 1;
#endif
int32_t L1 = 64 / max_threads * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
if (m > n) {
// 对 A 分块
MC = L1 / (KC * sizeof(int8_t));
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// 补齐 B
NC = (n + NR_INT8 - 1) / NR_INT8 * NR_INT8;
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
#if __aarch64__
// TODO()
#else
PackMatrixB_omp_2c_16(k, n, n % NR_INT8, B, ldb, packedB_int8);
#endif
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC * max_threads));
} else {
// 对 B 分块
NC = L1 / (KC * sizeof(int8_t));
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// 补齐 A
MC = (m + MR_INT8 - 1) / MR_INT8 * MR_INT8;
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
#if __aarch64__
// TODO()
#else
PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8);
#endif
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC * max_threads));
}
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC * max_threads));
if (m > n) {
#pragma omp parallel for
for (int32_t i = 0; i < m; i += MC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t mc;
mc = s_min(m - i, MC);
int8_t *local_A = packedA_int8 + MC * KC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
// TODO()
#else
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A);
#endif
if (bias == nullptr) {
InnerKernel(mc, n, alpha, local_A, packedB_int8, beta, local_C,
&C(i, 0), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias, addOnRow);
} else {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias + i, addOnRow);
}
}
}
} else {
#pragma omp parallel for
for (int32_t j = 0; j < n; j += NC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t nc;
nc = s_min(n - j, NC);
int8_t *local_B = packedB_int8 + KC * NC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
// TODO()
#else
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B);
#endif
if (bias == nullptr) {
InnerKernel(m, nc, alpha, packedA_int8, local_B, beta, local_C,
&C(0, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias + j, addOnRow);
} else {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias, addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
|
absgradMEX_test.c | /**************************************************************************
MEX function to compute the approximate gradient of the absolute value
Author: R. Marc Lebel
Contact: mlebel@gmail.com
Date: 11/2010
Useage: wc2 = absgradMEX(wc,smooth)
Input:
wc: numeric array (single/double; real/complex)
smooth: small smoothing factor to prevent Inf
Output:
wc2: numeric array
**************************************************************************/
#include <stdio.h>
#include "mex.h"
#include <omp.h>
#include <math.h>
#include <string.h>
#include "fast_mxArray_setup.c"
float Q_rsqrt( float number )
{
long i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = * ( long * ) &y;
i = 0x5f375a86 - ( i >> 1 );
y = * ( float * ) &i;
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
return y;
}
float Q_dsqrt( float number )
{
long i;
double x2, y;
const double threehalfs = 1.5D;
x2 = number * 0.5D;
y = number;
i = * ( long * ) &y;
i = 0x5fe6eb50c7b537a9 - ( i >> 1 );
y = * ( double * ) &i;
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
y = y * ( threehalfs - ( x2 * y * y ) );
return y;
}
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize nD, elem, cmplx, *size2;
long long i;
mxClassID precision;
const mwSize *size;
mxComplexity cmplx2;
mxArray *X, *Y;
double *pXr, *pXi, *pYi, *pYr, *pS, Sd, denom;
float *pXrf, *pXif, *pYif, *pYrf, *pSf, Sf, denomf;
/* Get size */
nD = mxGetNumberOfDimensions(right[0]);
size = mxGetDimensions(right[0]);
elem = mxGetNumberOfElements(right[0]);
/*mexPrintf("nD: %i\n",nD);
mexPrintf("size: %i\n",size[0]);
mexPrintf("elem: %i\n",elem);*/
/* Perform strange memory copy to replicate the size (needed for create_array_d/f) */
size2 = (mwSize *)mxMalloc(nD*sizeof(mwSize));
memcpy(size2,size,nD*sizeof(mwSize));
/* Test for complex and obtain data class */
cmplx = mxIsComplex(right[0]);
precision = mxGetClassID(right[0]);
cmplx2 = cmplx ? mxCOMPLEX:mxREAL;
/* Test to ensure smoothing factor is real */
if (mxIsComplex(right[1]))
mexErrMsgTxt("Inputs 1 is complex");
/* Get pointers to input array and create output */
if (precision == mxDOUBLE_CLASS) {
pXr = mxGetPr(right[0]);
if (cmplx)
pXi = mxGetPi(right[0]);
/* Create output and assign pointers */
create_array_d(&(left[0]), &pYr, &pYi, nD, size2, cmplx2, 0);
}
else {
pXrf = mxGetData(right[0]);
if (cmplx)
pXif = mxGetImagData(right[0]);
/* Create output and assign pointers */
create_array_f(&(left[0]), &pYrf, &pYif, nD, size2, cmplx2, 0);
}
/* Get pointer to input scalar */
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
pS = mxGetData(right[1]);
else
pSf = mxGetData(right[1]);
/* Convert smoothing factor to appropriate class */
if (precision == mxDOUBLE_CLASS) {
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
Sd = pS[0];
else
Sd = (double) pSf[0];
}
else {
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS)
Sf = (float) pS[0];
else
Sf = pSf[0];
}
/* Loop through and compute the gradient of the absolute value */
if (precision == mxDOUBLE_CLASS) {
if (cmplx) {
#pragma omp parallel for private(i,denom)
for (i=0; i<elem; i++) {
denom = 1.0/sqrt(pXr[i]*pXr[i] + pXi[i]*pXi[i] + Sd);
pYr[i] = pXr[i] * denom;
pYi[i] = pXi[i] * denom;
}
}
else {
#pragma omp parallel for private(i)
for (i=0; i<elem; i++) {
pYr[i] = pXr[i]/sqrt(pXr[i]*pXr[i] + Sd);
}
}
}
else {
if (cmplx) {
#pragma omp parallel for private(i,denomf)
for (i=0; i<elem; i++) {
denomf = Q_rsqrt(pXrf[i]*pXrf[i] + pXif[i]*pXif[i] + Sf); /* Not working on ubuntu?! */
/*denomf = 1.0/sqrt(pXrf[i]*pXrf[i] + pXif[i]*pXif[i] + Sf);*/
pYrf[i] = pXrf[i] * denomf;
pYif[i] = pXif[i] * denomf;
}
}
else {
#pragma omp parallel for private(i,denomf)
for (i=0; i<elem; i++) {
/*pYrf[i] = pXrf[i]/sqrt(pXrf[i]*pXrf[i] + Sf);*/
denomf = Q_rsqrt(pXrf[i]*pXrf[i] + Sf);
pYrf[i] = pXrf[i] * denomf; /* Not working on ubuntu?! */
}
}
}
/* Free memory */
mxFree(size2);
}
|
collective_reduction.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Implements the collective reduce and allreduce mixed */
/* mode OpenMP/MPI benchmarks. */
/*-----------------------------------------------------------*/
#include "collective_reduction.h"
/*-----------------------------------------------------------*/
/* reduction */
/* */
/* Driver subroutine for the reduce and allReduce */
/* benchmarks. */
/*-----------------------------------------------------------*/
int reduction(int benchmarkType) {
int dataSizeIter, sizeofBuf;
/* Initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter */
while (dataSizeIter <= maxDataSize) {
/* allocate space for the main data arrays.. */
allocateReduceData(dataSizeIter);
/* Perform benchmark warm-up */
if (benchmarkType == REDUCE) {
reduceKernel(warmUpIters, dataSizeIter);
/* Master process tests if reduce was a success */
if (myMPIRank == 0) { testReduce(dataSizeIter, benchmarkType); }
} else if (benchmarkType == ALLREDUCE) {
/* calculate sizeofBuf for test */
sizeofBuf = dataSizeIter * numThreads;
allReduceKernel(warmUpIters, dataSizeIter);
/* all processes need to perform unit test */
testReduce(sizeofBuf, benchmarkType);
}
/* Initialise the benchmark */
benchComplete = FALSE;
/* Execute benchmark until target time is reached */
while (benchComplete != TRUE) {
/* Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
/* Execute reduce for repsToDo repetitions */
if (benchmarkType == REDUCE) {
reduceKernel(repsToDo, dataSizeIter);
} else if (benchmarkType == ALLREDUCE) {
allReduceKernel(repsToDo, dataSizeIter);
}
/* Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time was reached with the number of reps */
if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); }
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark result for reporting */
if (myMPIRank == 0) {
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeReduceData();
/* Double dataSize and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* reduceKernel */
/* */
/* Implements the reduce mixed mode benchmark. */
/* Each thread under every MPI process combines its local */
/* buffer. This is then sent to the master MPI process to */
/* get the overall reduce value. */
/*-----------------------------------------------------------*/
int reduceKernel(int totalReps, int dataSize) {
int repIter, i, j;
for (repIter = 1; repIter < totalReps; repIter++) {
/* Manually perform the reduction between OpenMP threads */
#pragma omp parallel default(none) private(i, j) \
shared(tempBuf, globalIDarray, dataSize, numThreads) shared(localReduceBuf)
{
/* 1) Intialise the tempBuf array */
#pragma omp for schedule(static, dataSize)
for (i = 0; i < (numThreads * dataSize); i++) {
tempBuf[i] = globalIDarray[myThreadID];
}
/* 2) Reduce tempBuf into localReduceBuf */
#pragma omp for
for (i = 0; i < dataSize; i++) {
localReduceBuf[i] = 0;
for (j = 0; j < numThreads; j++) {
localReduceBuf[i] += tempBuf[(j * dataSize) + i];
}
}
}
/* Perform a reduce of localReduceBuf across the
* MPI processes.
*/
MPI_Reduce(localReduceBuf, globalReduceBuf, dataSize, MPI_INT, MPI_SUM, 0,
comm);
/* Copy globalReduceBuf into master Threads portion
* of finalReduceBuf.
*/
// FR this should only happen on rank == 0 thus added if to ensure this
if (myMPIRank == 0) {
for (i = 0; i < dataSize; i++) { finalReduceBuf[i] = globalReduceBuf[i]; }
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allReduce */
/* */
/* Implements the allreduce mixed mode benchmark. */
/* Each thread under every MPI process combines its local */
/* buffer. All MPI processes then combine this value to */
/* the overall reduction value at each process. */
/*-----------------------------------------------------------*/
int allReduceKernel(int totalReps, int dataSize) {
int repIter, i, j;
int startPos;
for (repIter = 0; repIter < totalReps; repIter++) {
/* Manually perform the reduction between OpenMP threads */
#pragma omp parallel default(none) private(i, j) \
shared(tempBuf, globalIDarray, dataSize, numThreads) shared(localReduceBuf)
{
/* 1) Intialise the tempBuf array */
#pragma omp for schedule(static, dataSize)
for (i = 0; i < (numThreads * dataSize); i++) {
tempBuf[i] = globalIDarray[myThreadID];
}
/* 2) Reduce tempBuf into localReduceBuf */
#pragma omp for
for (i = 0; i < dataSize; i++) {
localReduceBuf[i] = 0;
for (j = 0; j < numThreads; j++) {
localReduceBuf[i] += tempBuf[(j * dataSize) + i];
}
}
}
/* Perform an all reduce of localReduceBuf across
* the MPI processes.
*/
MPI_Allreduce(localReduceBuf, globalReduceBuf, dataSize, MPI_INTEGER,
MPI_SUM, comm);
/* Each thread copies globalReduceBuf into its portion
* of finalReduceBuf.
*/
#pragma omp parallel default(none) private(i, startPos) \
shared(dataSize, finalReduceBuf, globalReduceBuf)
{
/* Calculate the start of each threads portion
* of finalReduceBuf.
*/
startPos = (myThreadID * dataSize);
for (i = 0; i < dataSize; i++) {
finalReduceBuf[startPos + i] = globalReduceBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateReduceData */
/* */
/* Allocate memory for the main data arrays in the */
/* reduction operation. */
/*-----------------------------------------------------------*/
int allocateReduceData(int bufferSize) {
localReduceBuf = (int *)malloc(bufferSize * sizeof(int));
globalReduceBuf = (int *)malloc(bufferSize * sizeof(int));
/* tempBuf and Final reduce is of size dataSize*numThreads */
tempBuf = (int *)malloc((bufferSize * numThreads) * sizeof(int));
finalReduceBuf = (int *)malloc((bufferSize * numThreads) * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freeReduceData */
/* */
/* Free allocated memory for main data arrays. */
/*-----------------------------------------------------------*/
int freeReduceData() {
free(localReduceBuf);
free(globalReduceBuf);
free(tempBuf);
free(finalReduceBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testReduce */
/* */
/* Verifies that the reduction benchmarks worked correctly. */
/*-----------------------------------------------------------*/
int testReduce(int bufferSize, int benchmarkType) {
int i, testFlag, reduceFlag;
int correctReduce, lastGlobalID;
/* Initialise correctReduce to 0.. */
correctReduce = 0;
/* ..and testFlag to true */
testFlag = TRUE;
/* set lastGlobalID */
lastGlobalID = (numMPIprocs * numThreads);
/* Now find correctReduce value by summing to lastGlobalID */
for (i = 0; i < lastGlobalID; i++) { correctReduce = correctReduce + i; }
/* Compare each element of finalRecvBuf to correctRedcue */
for (i = 0; i < bufferSize; i++) {
if (finalReduceBuf[i] != correctReduce) { testFlag = FALSE; }
}
/* For allReduce, combine testFlag into master
* with logical AND.
*/
if (benchmarkType == ALLREDUCE) {
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* then master sets testOutcome using reduceFlag */
if (myMPIRank == 0) { setTestOutcome(reduceFlag); }
} else {
/* for reduce master process just sets testOurcome using testFlag */
setTestOutcome(testFlag);
}
return 0;
}
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2020, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#include "facedetection_export.h"
//#define _ENABLE_AVX512 //Please enable it if X64 CPU
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image!
/*
DO NOT EDIT the following code if you don't really understand it.
*/
#if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#else
#define _MAX_UINT8_VALUE 255
#endif
#if defined(_ENABLE_AVX512)
#define _MALLOC_ALIGN 512
#elif defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX512 and NEON at the same time.
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX and NEON at the same time.
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2)
#error Cannot enable the two of AVX512 and AVX2 at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
int lm[10];
}FaceRect;
typedef struct ConvInfoStruct_ {
int pad;
int stride;
int kernel_size;
int channels;
int num;
float scale;
signed char* pWeights;
signed int* pBias;
}ConvInfoStruct;
template <class T>
class CDataBlob
{
public:
T * data;
int width;
int height;
int channels;
int channelStep;
float scale;
//when the datablob is a filter, the bias is 0 by default
//if it is the filted data, the bias is 1 by default
int bias;
public:
CDataBlob() {
data = 0;
width = 0;
height = 0;
channels = 0;
channelStep = 0;
scale = 1.0f;
bias = 0;
}
CDataBlob(int w, int h, int c)
{
data = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data)
myFree(&data);
width = height = channels = channelStep = 0;
scale = 1.0f;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
bias = 0;
//alloc space for int8 array
int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T*)myAlloc(size_t(width) * height * this->channelStep);
if (data == NULL)
{
cerr << "Failed to alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->channelStep / sizeof(T);
T * pI = (this->data + (size_t(r) * this->width + c) * this->channelStep /sizeof(T));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8FilterData(signed char * pData, int bias, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(signed char) != typeid(T))
{
cerr << "Data must be signed char, the same with the source data." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dimension of the data can not match that of the Blob." << endl;
return false;
}
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
T * p = (this->data + (size_t(width) * row + col) * channelStep /sizeof(T));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
this->bias = bias;
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(unsigned char) != typeid(T))
{
cerr << "Data must be unsigned char, the same with the source data." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data, 0, size_t(width) * height * channelStep);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
T * pData = (unsigned char*)this->data + (size_t(r) * this->width + c) * this->channelStep;
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
#if defined(_ENABLE_NEON)
pData[output_channel_offset] = (pImgData[0] / 2);
pData[output_channel_offset + 1] = (pImgData[1] / 2);
pData[output_channel_offset + 2] = (pImgData[2] / 2);
#else
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset+1] = (pImgData[1]);
pData[output_channel_offset+2] = (pImgData[2]);
#endif
}
}
}
}
#if defined(_ENABLE_NEON)
this->bias = 1; // 1/2 = 0
this->scale = 0.5f;
#else
this->bias = 1;
this->scale = 1.0f;
#endif
return true;
}
T getElement(int x, int y, int channel)
{
if (this->data)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
T * p = this->data + (size_t(y) * this->width + x) * this->channelStep/sizeof(T);
return (p[channel]);
}
}
return (T)(0);
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel, scale) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ", " << dataBlob.scale
<< ", " << dataBlob.bias
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
T * p = (dataBlob.data + (dataBlob.width * row + col) * dataBlob.channelStep /sizeof(T) );
if(sizeof(T)<4)
output << (int)(p[ch]);
else
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob<signed char> *> filters;
int pad;
int stride;
float scale; //element * scale = original value
Filters()
{
pad = 0;
stride = 0;
scale = 0;
}
~Filters()
{
for (int i = 0; i < filters.size(); i++)
{
delete filters[i];
filters[i] = 0;
}
}
};
bool convertInt2Float(CDataBlob<int> * inputData, CDataBlob<float> * outputData);
bool convolution(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<int> *outputData);
bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<unsigned char> *outputData);
bool maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData);
bool priorbox(const CDataBlob<unsigned char> * featureData, int img_width, int img_height, int step, int num_sizes, float * pWinSizes, CDataBlob<float> * outputData);
template<typename T>
bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2, const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4, CDataBlob<T> *outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(const CDataBlob<T> * inputData, CDataBlob<T> * outputData);
bool softmax1vector2class(CDataBlob<float> *inputOutputData);
bool clamp1vector(CDataBlob<float> *inputOutputData);
bool detection_output(const CDataBlob<float> * priorbox,
const CDataBlob<float> * loc,
const CDataBlob<float> * conf,
const CDataBlob<float> * iou,
float overlap_threshold,
float confidence_threshold,
int top_k,
int keep_top_k,
CDataBlob<float> * outputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
lbfgsbsolver.h | // CppNumericalSolver
// based on:
// L-BFGS-B: A LIMITED MEMORY ALGORITHM FOR BOUND CONSTRAINED OPTIMIZATION
// Richard H. Byrd, Peihuang Lu, Jorge Nocedal and Ciyou Zhu
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../boundedproblem.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H
#define LBFGSBSOLVER_H
namespace cppoptlib {
template<typename TProblem>
class LbfgsbSolver : public ISolver<TProblem, 1> {
public:
using Superclass = ISolver<TProblem, 1>;
using typename Superclass::Scalar;
using typename Superclass::TVector;
using MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VariableTVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
protected:
// workspace matrices
MatrixType W, M;
Scalar theta;
int DIM;
int m_historySize = 5;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Scalar> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, const TVector &g, TVector &x_cauchy, VariableTVector &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Scalar> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
TVector d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Scalar>::max()));
} else {
Scalar tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - problem.upperBound()(j)) / g(j);
} else {
tmp = (x(j) - problem.lowerBound()(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
if (tmp == 0) d(j) = 0;
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Scalar*p
VariableTVector p = (W.transpose() * d); // (2mn operations)
// c := 0
c = VariableTVector::Zero(W.cols());
// f' := g^Scalar*d = -d^Td
Scalar f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Scalar*d-d^Scalar*W*M*W^Scalar*d = -\theta*f' - p^Scalar*M*p
Scalar f_doubleprime = (Scalar)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
f_doubleprime = std::max<Scalar>(std::numeric_limits<Scalar>::epsilon(), f_doubleprime);
Scalar f_dp_orig = f_doubleprime;
// \delta t_min := -f'/f''
Scalar dt_min = -f_prime / f_doubleprime;
// t_old := 0
Scalar t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Scalar t = SetOfT[b].second;
// \delta Scalar := t - 0
Scalar dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = problem.upperBound()(b);
else if (d(b) < 0)
x_cauchy(b) = problem.lowerBound()(b);
// z_b = x_p^{cp} - x_b
Scalar zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
VariableTVector wbt = W.row(b);
f_prime += dt * f_doubleprime + (Scalar) g(b) * g(b) + (Scalar) theta * g(b) * zb - (Scalar) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Scalar) - 1.0 * theta * g(b) * g(b)
- (Scalar) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Scalar) g(b) * g(b) * wbt.transpose() * (M * wbt);
f_doubleprime = std::max<Scalar>(std::numeric_limits<Scalar>::epsilon() * f_dp_orig, f_doubleprime);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max<Scalar>(dt_min, (Scalar)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector<int> &FreeVariables) {
Scalar alphastar = 1;
const unsigned int n = FreeVariables.size();
assert(du.rows() == n);
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min<Scalar>(alphastar, (problem.upperBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min<Scalar>(alphastar, (problem.lowerBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g,
TVector &SubspaceMin) {
Scalar theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != problem.upperBound()(i)) && (x_cauchy(i) != problem.lowerBound()(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
MatrixType WZ = MatrixType::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
TVector rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
MatrixType r = MatrixType::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
VariableTVector v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
MatrixType N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = MatrixType::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
if (v.size() > 0)
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
VariableTVector du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Scalar alpha_star = findAlpha(problem, x_cauchy, du, FreeVariablesIndex);
// STEP: 8
VariableTVector dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void setHistorySize(const int hs) { m_historySize = hs; }
void minimize(TProblem &problem, TVector &x0) {
if(!problem.isValid(x0))
std::cerr << "start with invalid x0" << std::endl;
DIM = x0.rows();
theta = 1.0;
W = MatrixType::Zero(DIM, 0);
M = MatrixType::Zero(0, 0);
MatrixType yHistory = MatrixType::Zero(DIM, 0);
MatrixType sHistory = MatrixType::Zero(DIM, 0);
TVector x = x0, g = x0;
//std::cout << "[*] CALL 1\n";
Scalar f = problem.value(x);
problem.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](TVector &x, TVector &g)->bool {
return (((x - g).cwiseMax(problem.lowerBound()).cwiseMin(problem.upperBound()) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
this->m_current.reset();
this->m_status = Status::Continue;
while (problem.callback(this->m_current, x) && noConvergence(x, g) && (this->m_status == Status::Continue)) {
Scalar f_old = f;
TVector x_old = x;
TVector g_old = g;
// STEP 2: compute the cauchy point
TVector CauchyPoint = TVector::Zero(DIM);
VariableTVector c = VariableTVector::Zero(W.cols());
getGeneralizedCauchyPoint(problem, x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
TVector SubspaceMin;
SubspaceMinimization(problem, CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Scalar alpha_init = 1.0;
//const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init);
//static Scalar linesearch(const TVector &x, const TVector &searchDir, ProblemType &objFunc, const Scalar alpha_init = 1.0)
std::cout << "\n[*] MORETHUENTE.h linesearch() \t " << (x.array().exp()).matrix().transpose() << "\n\n";
//Scalar fval = objFunc.value(x);
//TVector g = x.eval();
//objFunc.gradient(x, g);
TVector s = (SubspaceMin-x).eval();
//TVector xx = x.eval();
MoreThuente<TProblem, 1>::cvsrch(problem, x, f, g, alpha_init, s);
const Scalar rate = alpha_init;
//std::cout << "\nLINE SEARCH - rate = " << rate << "\tSUBSPACEMIN = " << SubspaceMin.transpose() << std::endl;
// update current guess and function information
//std::cout << "OLD X = " << (x.array().exp()).matrix().transpose() << "\n";
x = x - rate*(x-SubspaceMin);
std::cout << "\n[*] CALL 2\t" << (x.array().exp()).matrix().transpose() << "\n\n";
f = problem.value(x);
// update the fDelta convergence status
this->m_current.fDelta = std::abs(f-f_old);
problem.gradient(x, g);
// prepare for next iteration
TVector newY = g - g_old;
TVector newS = x - x_old;
// STEP 6:
Scalar test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (yHistory.cols() < m_historySize) {
yHistory.conservativeResize(DIM, yHistory.cols() + 1);
sHistory.conservativeResize(DIM, sHistory.cols() + 1);
} else {
yHistory.leftCols(m_historySize - 1) = yHistory.rightCols(m_historySize - 1).eval();
sHistory.leftCols(m_historySize - 1) = sHistory.rightCols(m_historySize - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Scalar)(newY.transpose() * newY) / (newY.transpose() * newS);
W = MatrixType::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
MatrixType A = sHistory.transpose() * yHistory;
MatrixType L = A.template triangularView<Eigen::StrictlyLower>();
MatrixType MM(A.rows() + L.rows(), A.rows() + L.cols());
MatrixType D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
if (fabs(f_old - f) < 1e-8) {
// successive function values too similar
break;
}
++this->m_current.iterations;
this->m_current.gradNorm = g.norm();
std::cout << "\n-------------------------------- CONVERGENCE CHECK --------------------------------\n";
std::cout << "Current fDelta = " << this->m_current.fDelta << std::endl;
this->m_status = checkConvergence(this->m_stop, this->m_current);
}
x0 = x;
if (this->m_debug > DebugLevel::None) {
std::cout << "Stop status was: " << this->m_status << std::endl;
std::cout << "Stop criteria were: " << std::endl << this->m_stop << std::endl;
std::cout << "Current values are: " << std::endl << this->m_current << std::endl;
}
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
main.c | /*
* spike.c
* Spike
*
* Created by Ben Evans on 19/06/2008.
* Copyright 2008 University of Oxford. All rights reserved.
*
*/
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <unistd.h>
#ifdef SERIAL
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pwd.h>
#include <dlfcn.h>
#include "utils.h"
#include "globals.h"
#include "parameters.h"
#include "read_parameters.h"
extern int spike(PARAMS * mp);
char * RESDIR = RESULTSDIRECTORY;
char * DPFILE = DEFAULTPFILE;
char * MPFILE = OUTPUTPFILE;
char * IMGDIR = IMGDIRECTORY;
char * IPFILE = IMGPARAMFILE;
char STFILE[BUFSIZ] = ""; // = STIMULIFILE; //char * STFILE = NULL;
char PPSTFILE[BUFSIZ] = "";
#define GSLDIR "/opt/local/lib/" // Unnecessary? See checks below
// otool -L Spike // to test which dynamic libraries are being used
// http://discussions.apple.com/thread.jspa?threadID=1741520
// http://www.cprogramming.com/tutorial/shared-libraries-linux-gcc.html
// Apple's malloc debugging library: libgmalloc
// To use: export DYLD_INSERT_LIBRARIES=/usr/lib/libgmalloc.dylib
PARAMS * mp;
unsigned long int seed = 0;
gsl_rng * mSeed = NULL;
gsl_rng ** states = NULL;
int nThreads = 1;
char *pFile = NULL;
int main (int argc, const char * argv[])
{
//int set_error = 0;
bool rerun = false;
bool compress = true;
bool seedFlag = false;
int hours = 0;
int mins = 0;
char *imageArchive = NULL;
//char *sfile = NULL;
char *cpfile = "CLIparams.m";
FILE * cli_FP = NULL;
FILE * pipeFP = NULL;
char syscmd[BUFSIZ]; // stdio.h : 1024
char dlver[BUFSIZ];
char *bptr = NULL;
int th = 0;
float proportion = 1.0;
#ifdef _OPENMP
bool dynamic = false;
#endif
bool genNewSeed = false;
bool p_flag = false; // Parameter (CLI) flag
bool pf_flag = false; // Parameter file flag
bool ia_flag = false; // Image archive flag
int pINcount = 0;
int dcount = 0; // Default parameter count
int fcount = 0; // File parameter count
int pcount = 0; // Parameter count
int ocount = 0; // Parameters out count
int err = 0;
int slen = 0;
char * error = NULL;
void * dylib = NULL;
bool recFlag = false;
char * recList = NULL;
bool skip_arg = false;
//double wtime, begin;
char timeStr[FNAMEBUFF];
int c, result;
struct tm *ts;
char hostname[FNAMEBUFF];
char schedule[BUFSIZ];
strncpy(schedule, "<default>", BUFSIZ);
printf("--------------------------------------------------------------------------------\n");
time_t now = time(NULL);
ts = localtime(&now);
strftime(timeStr, FNAMEBUFF, "%a %d/%b/%Y %H:%M:%S", ts);
err = gethostname(hostname, FNAMEBUFF);
//char *user = getenv("USER");
struct passwd *userinfo = getpwuid(geteuid());
char *user = userinfo->pw_name;
if (user && err != -1)
printf("[%s] : Program started by %s@%s\n", timeStr, user, hostname);
char cwd[BUFSIZ];
if (getcwd(cwd, sizeof(cwd)) != NULL)
fprintf(stdout, "DIR: %s\n", cwd); // Print directory
else
perror("getcwd() error");
// Move this section to a seperate header e.g. compiler.h
#ifdef _OPENMP
#define OMP "T"
#else
#define OMP "F"
#endif
#ifdef __GNUC__ // N.B. __GNUC__ is for any compiler implementing GNU compiler extensions, hence is defined for clang and llvm-gcc
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#ifdef __llvm__ // Using LLVM backend
// http://clang.llvm.org/docs/LanguageExtensions.html
//printf("%d\n",__COUNTER__);
#ifdef __clang__ // Using Clang-LLVM
// For a list of builtin defines type: clang -x c /dev/null -dM -E
printf("Compiler: Clang-LLVM %s\n", __clang_version__);
#else // Using GCC-LLVM
printf("Compiler: GCC-LLVM %s\n", __VERSION__);
#endif
// Time of last modification of current source file...
printf("Compiled on: %s | Optimization: %d | Debug: %d | OpenMP: %s\n", \
__TIMESTAMP__, __OPTIMIZE__, DEBUG, OMP);
#if __has_feature(c_static_assert) // Working? Relevent?
printf("Includes support for compile-time assertions\n");
#else
fprintf(stderr, "*** Warning: assert() disabled in parallel regions! ***\n");
#endif
#else // Using GCC
printf("Compiler: %s | Optimization: %d | Debug: %d | OpenMP: %s\n", \
__VERSION__, __OPTIMIZE__, DEBUG, OMP);
printf("Source modified on: %s\n",__TIMESTAMP__);
printf("Compiled on: %s at %s\n", __DATE__, __TIME__);
#endif
//#endif // TODO Check this is unnecessary
#ifdef NDEBUG
fprintf(stderr, "*** Warning: Executing without error checking! ***\n");
#endif
if (strcmp(user, "nobody")==0)
SIM.Xgrid = true;
else
if (getenv("OMP_SCHEDULE")) // not NULL string
strncpy(schedule, getenv("OMP_SCHEDULE"), BUFSIZ);
char * rsfile = RSFILE;
printf("--------------------------------------------------------------------------------\n");
printf("Checking for \"%s\" in current directory... \t\t\t [%s]\n",DPFILE,\
(file_exists(DPFILE))?"OK":"NO");
printf("Checking for \"%s\" in current directory... \t\t [%s]\n",rsfile,\
(file_exists(rsfile))?"OK":"NO");
// Check for GSL
dylib = dlopen(GSLDIR"libgsl.dylib",RTLD_NOW);
printf("Checking %s for GSL dyamic libraries... \t\t\t [%s]\n",GSLDIR,(dylib)?"OK":"NO");
if ((error = dlerror()) != NULL || !dylib)
exit_error("main: libgsl.dylib check", error);
else // dylib != NULL
dlclose(dylib);
// Check for System libraries
dylib = dlopen("libSystem.dylib", RTLD_NOW);
printf("Checking for System dynamic libraries... \t\t\t\t [%s]\n",(dylib)?"OK":"NO");
if ((error = dlerror()) != NULL || !dylib)
exit_error("main: libSystem.dylib check", error);
else // dylib != NULL
dlclose(dylib);
// Runtime OpenMP check using int omp_in_parallel(void);
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master//single
{
printf("Checking for OpenMP runtime parallelism... \t\t\t\t [%s]\n",\
omp_in_parallel()?"OK":"NO");
}
}
#endif
printf("--------------------------------------------------------------------------------\n");
char exec[BUFSIZ];
strncpy(exec, argv[0], sizeof(exec)-1);
if (argc==1)
{
printf("%s usage:\n",exec); //argv[0]);
printf("-c[lean]\t: Clean all dat and tbz files (including image archives!)\n");
printf("-f <filename>\t: Pass parameter filename\n");
printf("-r[erun]\t: Rerun simulation with the random seed in %s\n",rsfile);
printf("-g[enerate]\t: Generate new random seed in %s and exit\n",rsfile);
printf("-s <seed>\t: Explicitly pass random seed [0, (2^32)-1]\n");
printf("-k <record list>: Pass list of neurons to be recorded\n");
printf("-d[ynamic]\t: Set number of threads to be dynamic\n");
printf("-m <proportion>\t: Set number of threads to a proportion of cores [0.0, 1.0]\n");
printf("-t <threads>\t: Explicitly set the number of threads to use\n");
printf("-p <parameter>\t: Pass a parameter string <name>=<value>\n");
printf("--<parameter>\t: Pass a parameter string <name>=<value>\n");
//printf("-i[mage] <directory>\t: Pass directory of filtered images [***incomplete***]\n");
printf("-j <images>.tbz\t: Pass compressed image archive\n");
printf("-u[ncompressed]\t: Prevent data compression\n");
printf("-x[grid]\t: Set as an Xgrid simulation i.e. print progress information\n");
printf("================================================================================\n");
return 0;
}
#ifdef __APPLE__ //__unix__
// Output command passed to cmd.sh
int a=0;
cli_FP = myfopen("cmd.sh", "w");
fprintf(cli_FP, "#!/bin/bash\n");
for (a=0; a<argc; a++)
fprintf(cli_FP, "%s ",argv[a]);
fprintf(cli_FP, "\n");
fclose(cli_FP);
system("chmod 755 cmd.sh");
#endif
while (--argc > 0 && (*++argv)[0] == '-')
{
//skip_arg = 0;
while (!skip_arg && (c = *++argv[0]))
{
switch (c)
{
case 'c': // Clean directory of .dat and .tbz files
system("rm *.dat *.tbz");
break;
case 'f': // Parameter file name
pf_flag = true;
pFile = myalloc(strlen(*++argv)+1); //sizeof(char)==1 guaranteed
strcpy(pFile, *argv);
skip_arg = true;
argc--;
break;
case 'r': // Rerun with last seed
rerun = true; //RERUN = 1;
break;
case 'g': // Generate a new random seed and exit
genNewSeed = true;
break;
case 's': // Explicitly pass random seed (takes precedence over -r)
seed = atol(*++argv);
seedFlag = true;
skip_arg = true;
argc--;
break;
case 'k': // Read in list of neurons
fprintf(stderr, "*** -k: Specifying neurons for recording is not yet implemented! ***\n");
//int * recordSet = NULL;
recFlag = true;
slen = strlen(*++argv);
recList = myalloc(slen+1);
//char * tstr = NULL;
//int count = 0;
strncpy(recList, *argv, slen);
recList[slen] = '\0'; // NULL terminate last byte
//strtok(list, ";");
//while (tstr && (*trim(tstr) != ('[' || ']')) && (tstr != '\0'))
// count = parseIntVector(list, &recordSet);
skip_arg = true;
argc--;
break;
case 'd': // May reduce num_thd depending on system load
#ifdef _OPENMP
dynamic = true;
omp_set_dynamic(dynamic); // Can not be used with RNG
// if nCores > 4
// if nThreads > nCores-1 -> set nThreads = nCores - 2...
#else
fprintf(stderr, "*** -d: OpenMP disabled! ***\n");
#endif
break;
case 'm': // Set the proportion of threads from the CLI [0.0, 1.0]
#ifdef _OPENMP
proportion = atof(*++argv);
//#ifndef __llvm__
assert(0.0 < proportion && proportion <= 1.0);
//#endif
nThreads = round(omp_get_num_procs()*proportion);
nThreads = (nThreads>1) ? nThreads : 1;
omp_set_num_threads(nThreads);
#else
fprintf(stderr, "*** -m %f: OpenMP disabled! ***\n",proportion);
#endif
skip_arg = true;
argc--;
break;
case 't': // Set the number of threads from the CLI
#ifdef _OPENMP
nThreads = atoi(*++argv);
/*if (nThreads >= omp_get_num_procs())
omp_set_dynamic(true);
else
omp_set_num_threads(nThreads);*/
omp_set_num_threads(nThreads);
if (nThreads >= omp_get_num_procs())
printf("Warning: nThreads (%d) >= nProcessors (%d)!\n",\
nThreads, omp_get_num_procs());
#else
fprintf(stderr, "-t %d: OpenMP disabled\n",nThreads);
#endif
skip_arg = true;
argc--;
break;
case 'p': // Code to pass a parameter string e.g. "param=0"
if (!p_flag)
{
cli_FP = myfopen(cpfile, "w");
p_flag = true;
}
fprintf(cli_FP, "%s;\n", *++argv);
skip_arg = true;
argc--;
pINcount++;
break;
case '-': // Equivalent to '-p ' but combines the arguments
if (!p_flag)
{
cli_FP = myfopen(cpfile, "w");
p_flag = true;
}
fprintf(cli_FP, "%s;\n", ++argv[0]); // Advance to next char address
skip_arg = true;
//argc--;
pINcount++;
break;
case 'i': // Pass Image directory
break;
case 'j': // Alternatively pass compressed tar (cjvf) of images
ia_flag = true;
slen = strlen(*++argv);
imageArchive = myalloc(slen+1);
strncpy(imageArchive, *argv, slen);
imageArchive[slen] = '\0'; // NULL terminate last byte
skip_arg = true;
argc--;
break;
case 'u': // Keep data uncompressed
compress = false;
//printf("Warning: tbz archives should be removed to prevent analysis of them!\n");
break;
case 'x': // Xgrid simulation
SIM.Xgrid = true;
break;
default:
printf("Illegal arguement: %c\n", c);
argc = 0;
break;
}
if (skip_arg)
{
skip_arg = false;
break;
}
}
}
#ifdef _OPENMP
#pragma omp parallel //private (th_id)
{
//th_id = omp_get_thread_num();
nThreads = omp_get_num_threads(); //num_thd
#pragma omp single
{
printf("OMP: (%d/%d)\t{OMP_DYNAMIC=%s, OMP_NESTED=%s, OMP_SCHEDULE=%s}\n", \
nThreads, omp_get_num_procs(), \
(omp_get_dynamic() ? "TRUE" : "FALSE"), \
(omp_get_nested() ? "TRUE" : "FALSE"), \
(schedule));
}
}
#else
nThreads = 1;
printf("Executing in serial.\n");
#endif
FILE * randSeedFP;
char * sString, buffer[BUFSIZ];
unsigned long long seedMod = pow(2, 32); // 32-bit unsigned seeds (max value = pow(2, 32)-1)
// printf("This program is compiled with GSL version %s.\n", GSL_VERSION);
//#if DEBUG > 1 // Print GSL verison and location
// system("gsl-config --prefix --version");
//#endif
//if (!SIM.Xgrid)
//{
// Print GSL verison and location
if ((pipeFP = popen("/opt/local/bin/gsl-config --prefix --version", "r")))
{
fgets(syscmd, sizeof(syscmd)-1, pipeFP);
fgets(dlver, sizeof(dlver)-1, pipeFP);
}
pclose(pipeFP);
if ((bptr = strpbrk(syscmd, "\r\n"))) //strstr(syscmd, '\n')
*bptr = '\0';
printf("GSL: Compiled with v%s, found dynamic libraries v%4.2f at: %s\n", \
GSL_VERSION,atof(dlver),syscmd);
// if atof(dlver) < GSL_MIN
//}
// Initialise random seed
const gsl_rng_type * T = gsl_rng_default; // Set RNG type
gsl_rng_env_setup(); // http://www.gnu.org/software/gsl/manual/html_node/Random-number-environment-variables.html
mSeed = gsl_rng_alloc(T); // Used for serial sections with randomness
if (genNewSeed) // Generate a new random seed file and exit
{
seed = (unsigned long) time((time_t *) NULL);
seed %= seedMod;
randSeedFP = myfopen(rsfile, "w");
fprintf(randSeedFP, "mSeed: \t%ld\n", seed);
fclose(randSeedFP);
printf("New seed generated in %s: %ld (%s) <GSL v%s>\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION); // Generator type not strictly necessary here
return 0;
}
if (!seedFlag)
{
if (rerun) // Also rerun with parameters.m?
{
randSeedFP = myfopen(rsfile, "r");
if ((sString = fgets(buffer, sizeof(buffer), randSeedFP)) != NULL) //while
seed = atol(strrchr(sString,':')+1); //ans[count++]
fclose(randSeedFP);
printf("Rerunning simulation with %s: %ld (%s) <GSL v%s>\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION);
}
else
{
seed = (unsigned long) time((time_t *) NULL);
seed %= seedMod;
fprintf(stderr, "*** Warning: Creating new seed in %s: %ld (%s) <GSL v%s> ***\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION);
randSeedFP = myfopen(rsfile, "w");
fprintf(randSeedFP, "mSeed: \t%ld\n", seed);
fclose(randSeedFP);
}
}
gsl_rng_set(mSeed, seed); //gsl_rng_set(mSeed, -idum);
// Allocate and initialise model parameters structure
mp = myalloc(sizeof(*mp)); // Place in getParameters with default init?
mp->initialised = false;
mp->imgList = NULL;
mp->LvExcit = mp->LvInhib = mp->LpEfE = mp->LpElE = mp->LpEI = mp->LpIE = mp->LpII = 0;
mp->vExcit = mp->vInhib = mp->vScales = mp->vOrients = mp->vPhases = NULL;
mp->pCnxEfE = mp->pCnxElE = mp->pCnxIE = mp->pCnxEI = mp->pCnxII = NULL;
mp->layDim = NULL;
mp->vSquare = mp->vRecords = NULL;
mp->rInhib = 0.0;
int syserr = 0;
if (ia_flag) //assert(mp->useFilteredImages);
{
FILEPARTS * fp = myalloc(sizeof(*fp));
getFileParts(imageArchive, fp);
slen = strlen(fp->fname);
mp->imgDir = myalloc(slen+1);
strncpy(mp->imgDir, fp->fname, slen);
mp->imgDir[slen] = '\0'; // NULL terminate last byte
assert(file_exists(imageArchive));
if (snprintf(syscmd, BUFSIZ, "mkdir %s", mp->imgDir) >= BUFSIZ)
fprintf(stderr, "Warning! Undersized buffer: %s", syscmd);
if((syserr = system(syscmd))==0)
{
printf("Now extracting %s...\t", imageArchive);
if(snprintf(syscmd, BUFSIZ, "tar -xf %s -C %s/",imageArchive, mp->imgDir) >= BUFSIZ)
fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd);
syserr = system(syscmd);
if (syserr)
EE("Error extracting image archive");
else
printf("Images successfully extracted to %s\n", mp->imgDir);
}
}
SIM.minTau = BIG;
// Read in parameters from .m file
printf("I/O: Processing parameters: \"%s\"", !pFile ? DPFILE : pFile);
if (p_flag)
fclose(cli_FP);
dcount = read_parameters(mp, DPFILE);
fcount = (pFile != NULL) ? read_parameters(mp, pFile) : 0;
pcount = (p_flag) ? read_parameters(mp, cpfile) : 0;
if (!mp->useFilteredImages)
assert(pcount == pINcount);
//printf(" {%d,%d,%d}\tParsing complete!\n", dcount, fcount, pcount);
// Print parameters to MPFILE (parameters.m)
ocount = printParameters(mp, MPFILE); // Variables to read into Matlab
//printf("%d parameters written to %s\n", pcount, MPFILE);
printf(" {%d,%d,%d} --> \"%s\" {%d} Done!\n", dcount, fcount, pcount, MPFILE, ocount);
// Create a random seed for each thread to ensure thread safety
if (mp->noise) // Could add a state to every neuron to achieve same results with different threads
{
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
{
omp_set_dynamic(false); // Do not adjust number of threads according to system load
}
}
#endif
states = myalloc(nThreads * sizeof(**states));
for (th=0; th<nThreads; th++)
{
states[th] = gsl_rng_alloc(T);
gsl_rng_set(states[th], seed+th+1);
}
}
if (recFlag) // Make recordSet global, prevent random choice in init_network and free at the end
{
/*int ** recordSet = myalloc(mp->nLayers * sizeof(*recordSet));
char * tstr = NULL;
int count = 0;
strtok(list, ";");
while (tstr && (*trim(tstr) != ']') && (tstr != '\0'))
for (l=0; l<mp->nLayers; l++)
{
mp->vRecords[l] = parseIntVector(list, &recordSet[l]);
tstr = strtok(NULL, ";");
}*/
myfree(recList);
}
// Print minimum tau and DT to nearest microsecond
printf("TAU: Smallest time constant = %.3f ms | DT = %.3f ms\n", SIM.minTau*1000, mp->DT*1000);
if (mp->DT >= 2*SIM.minTau) // CHECK THIS
fprintf(stderr, "*** Warning: Forward Euler stability condition violated! ***\n");
assert(mp->DT <= 0.001); // Timesteps must be no larger than 1 ms or mp->TSperMS==0!
// Display dynamic libraries: otool -L ~/bin/SpikeNet/Debug/Spike
#ifdef _OPENMP // Use omp function omp_get_wtime
//double begin = omp_get_wtime();
SIM.start = omp_get_wtime();
SIM.elapsed = 0.0;
#else
time_t start = time(NULL);
#endif
if (!SIM.Xgrid && !mp->loadWeights) // Remove *.dat and *.tbz
{
system("rm *.dat"); //system("rm *.dat *.tbz");
if (ia_flag)
{
if(snprintf(syscmd, BUFSIZ, "find *.tbz ! -name %s -delete",imageArchive) >= BUFSIZ)
fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd);
if (system(syscmd)) // Delete *.tbz except image archive
printf("Archive files successfully cleaned!\n");
else
EE("Error cleaning archive files!"); //exit_error("main.c", "Error cleaning archive files!\n");
}
else
system("rm *.tbz"); // Delete *.tbz
}
if (mp->loadWeights)
{
// Pass an archive with all relevant dat files with CLI flag e.g. network.tbz
const char * suffix = "";
char fname[FNAMEBUFF];
slen = snprintf(fname, FNAMEBUFF, "L0affNeuronsElE%s.dat", suffix);
assert(slen < FNAMEBUFF);
if (!file_exists(fname))
{
if (file_exists("connectivity.tbz"))
system("tar -xvf connectivity.tbz");
else
EE("No connectivity files to load"); //exit_error("main", "No connectivity files to load");
}
if (mp->nLayers > 1)
{
slen = snprintf(fname, FNAMEBUFF, "L1affNeuronsEfE%s.dat", suffix);
assert(slen < FNAMEBUFF);
if (!file_exists(fname))
{
if (file_exists("postTraining.tbz"))
system("tar -xvf postTraining.tbz");
else
EE("No weights files to load"); //exit_error("main", "No weights files to load");
}
}
}
/***** RUN SIMULATION *****/
result = spike(mp);
/**************************/
// Compress data files for crash-free xgrid! '-j' Uses bzip (*.tbz equivalent to *.tar.bz2)
// Append files to fileList and call system(syscmd); once and keep fileList
//snprintf(syscmd, BUFSIZ, "tar -cjvf %s.tbz %s > fileList","connectivity","*affNeurons.dat");
/*if (!SIM.Xgrid) // /sbin/md5
system("md5 *.dat > datHashs.txt");*/
system("shasum *.dat > datHashs.txt"); // /usr/bin/shasum
/*snprintf(syscmd, BUFSIZ, "xargs rm < fileList");*/
//--remove-files (remove files after adding them to the archive) : only 10.5
// Check that system() returned 0 (no errors) Bash: echo $?
#pragma omp parallel sections private(syserr) // Experimental!
{
#pragma omp section
{
if (compress)
{
printf("\tCompressing data to .tbz archives...\t");
fflush(stdout);
if (!(mp->useFilteredImages || mp->stimGroups))
if ((syserr = system("tar -cjf stimuli.tbz *stimuli.dat stimuli.m")) == 0)
system("tar -tf stimuli.tbz | xargs rm");
if(mp->nRecordsPL)
{
if (mp->priorPhases)
if ((syserr = system("tar -cjf PPrecords.tbz R*PP_*.dat")) == 0)
system("tar -tf PPrecords.tbz | xargs rm");
if ((syserr = system("tar -cjf records.tbz R*.dat")) == 0)
system("tar -tf records.tbz | xargs rm");
}
if (mp->printConnections)
{
if (snprintf(syscmd, BUFSIZ, "tar -cjf connectivity.tbz *affNeurons*.dat %s %s",(mp->SOM)?"*dist*.dat":"",(mp->axonDelay)?"*affDelays*.dat":"") >= BUFSIZ)
fprintf(stderr, "Warning! Undersized buffer: %s", syscmd);
syserr = system(syscmd);
/*if (mp->SOM)
syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat *affDelays*.dat *dist*.dat");
else
{
if (mp->axonDelay)
syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat *affDelays*.dat"); //system("tar --remove-files -cjvf connectivity.tbz *affNeurons*.dat > fileList");
else
syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat");
}*/
if (!syserr)
system("tar -tf connectivity.tbz | xargs rm");
}
if (mp->pretrain)
{
if (mp->priorPhases)
if ((syserr = system("tar -cjf PPpreTraining.tbz PP_pt*.dat")) == 0)
system("tar -tf PPpreTraining.tbz | xargs rm");
if ((syserr = system("tar -cjf preTraining.tbz pt*.dat")) == 0)
system("tar -tf preTraining.tbz | xargs rm");
}
if (mp->train)
{
if (mp->priorPhases)
if ((syserr = system("tar -cjf PPtraining.tbz PP_E*.dat")) == 0) // 2> tar_err
system("tar -tf PPtraining.tbz | xargs rm");
if ((syserr = system("tar -cjf training.tbz E*.dat")) == 0) // 2> tar_err
system("tar -tf training.tbz | xargs rm");
}
if (mp->priorPhases)
if ((syserr = system("tar -cjf PPpostTraining.tbz PP_*.dat")) == 0) // 2> tar_err
system("tar -tf PPpostTraining.tbz | xargs rm");
if ((syserr = system("tar -cjf postTraining.tbz L*Spikes.dat L*weights*.dat")) == 0)
system("tar -tf postTraining.tbz | xargs rm");
//system(syscmd);
//system("rm fileList");
printf("Data Compressed!\n");
fflush(stdout);
}
//#pragma omp section
/*if (!SIM.Xgrid) // Print md5 #'s // /sbin/md5
{
//system("md5 Spike");
system("md5 parameters.m"); // shasum
system("md5 datHashs.txt");
//system("md5 *.tbz"); // Contains metadata (e.g. timestamps) which will give different #s
}*/
printf("Computing SHA checksums...\n");
slen = snprintf(syscmd, sizeof(syscmd)-1, "shasum %s", exec);
// To Do: Also print hashes for input files.
#ifndef __llvm__
assert(slen < (signed) sizeof(syscmd));
#endif
system(syscmd);
system("shasum parameters.m");
system("shasum datHashs.txt");
printf("Checksums computed!\n");
} // End of section
// Clean up
#pragma omp section
if (pf_flag)
myfree(pFile);
#pragma omp section
if (ia_flag)
{
myfree(imageArchive);
if(snprintf(syscmd, BUFSIZ, "rm -R %s/", mp->imgDir) >= BUFSIZ)
fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd);
system(syscmd); // Delete expand image files
}
// Print out input/output file list? array of structs with a bool and filename string...
#pragma omp section
{
gsl_rng_free(mSeed);
if (mp->noise)
{
for (th=0; th<nThreads; th++)
gsl_rng_free(states[th]); // Free all memory associated with generator
myfree(states);
}
}
//if (recFlag) // Free list of records
} // End of parallel sections
if (mp->useFilteredImages)
{
myfree(mp->imgDir);
myfree(mp->imgList);
myfree(mp->vScales);
myfree(mp->vOrients);
myfree(mp->vPhases);
}
myfree(mp->vRecords);
myfree(mp->vExcit);
myfree(mp->vInhib);
myfree(mp->pCnxEfE);
myfree(mp->pCnxElE);
myfree(mp->pCnxIE);
myfree(mp->pCnxEI);
myfree(mp->pCnxII);
myfree(mp->layDim);
myfree(mp->vSquare);
myfree(mp);
#ifdef _OPENMP
//getTimeString(timeStr, FNAMEBUFF, omp_get_wtime()-begin);
double wtime = omp_get_wtime() - SIM.start; //begin;
//double integral;
//double fraction = modf(wtime, &integral);
//duration = (time_t) round(integral);
hours = floor(wtime/3600);
wtime -= hours*3600;
mins = floor(wtime/60);
wtime -= mins*60; //secs = wtime - (mins*60) - (hours*3600);
snprintf(timeStr, FNAMEBUFF, "%d:%02d:%06.3lf (%d Threads)",\
hours,mins,wtime,nThreads);
#else
time_t duration = time(NULL) - start; // finish = round(time(NULL) - start);
hours = floor(duration/3600);
duration -= hours*3600;
mins = floor(duration/60);
duration -= mins*60;
int secs = duration;
snprintf(timeStr, FNAMEBUFF, "%d:%02d:%02d (Serial)",hours,mins,secs);
#endif
if (result==0)
printf("Simulation completed in %s!\n",timeStr);
else
{
fprintf(stderr, "*** Simulation aborted after %s! ***\n",timeStr);
return 1;
}
//printf("--------------------------------------------------------------------------------\n");
printf("================================================================================\n");
return 0;
}
|
target-data-2c.c | // ----------------------------------------------------------------------------------------
// Implementation of Example target.3c (Section 52.3, page 196) from Openmp
// 4.0.2 Examples
// on the document http://openmp.org/mp-documents/openmp-examples-4.0.2.pdf
//
//
//
//
// ----------------------------------------------------------------------------------------
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
/* Problem size */
#define N 8192
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init(DATA_TYPE *A, DATA_TYPE *B) {
int i;
for (i = 0; i < N; i++) {
A[i] = i / 2.0;
B[i] = ((N - 1) - i) / 3.0;
}
return;
}
void init_again(DATA_TYPE *A, DATA_TYPE *B) {
int i;
for (i = 0; i < N; i++) {
A[i] = i;
B[i] = ((N - 1) - i);
}
return;
}
void vec_mult(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i;
for (i = 0; i < N; i++)
C[i] = A[i] * B[i];
init_again(A, B);
for (i = 0; i < N; i++)
C[i] += A[i] * B[i];
}
void vec_mult_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i;
#pragma omp target data map(from : C[ : N])
{
#pragma omp target map(to : A[ : N], B[ : N])
#pragma omp parallel for
for (i = 0; i < N; i++)
C[i] = A[i] * B[i];
init_again(A, B);
#pragma omp target map(to : A[ : N], B[ : N])
#pragma omp parallel for
for (i = 0; i < N; i++)
C[i] += A[i] * B[i];
}
}
int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) {
int i, fail;
fail = 0;
// Compare B and B_GPU
for (i = 0; i < N; i++) {
if (B[i] != B_GPU[i])
printf("DIFF @ %d![%f, %f]\n", i, B[i], B_GPU[i]);
if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end, t_start_OMP, t_end_OMP;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *C_OMP;
A = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
C_OMP = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
fprintf(stdout, ">> Two vector multiplication <<\n");
// initialize the arrays
init(A, B);
t_start_OMP = rtclock();
vec_mult_OMP(A, B, C_OMP);
t_end_OMP = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP); //);
#ifdef RUN_TEST
// initialize the arrays
init(A, B);
t_start = rtclock();
vec_mult(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //);
fail = compareResults(C, C_OMP);
free(A);
free(B);
free(C);
free(C_OMP);
#endif
return fail;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.