source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sum2_int.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int *X) {
for (int i = 0; i<N; i++) {
X[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
int sum(int *X, int *Y, int *answer) {
int result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
answer[i] = X[i] + Y[i] * 2;
}
return result;
}
// Debug functions
int sum_serial(int *X, int *Y, int *answer) {
int result = 0;
for (int i = 0; i<N; i++) {
answer[i] = X[i] + Y[i] * 2;
}
return result;
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int check(int *serial, int *SIMD) {
int diff = 0;
for (int i = 0; i<N; i++) diff += serial[i] - SIMD[i];
return diff;
}
int main(int argc, char **argv) {
//Set everything up
int *X = malloc(sizeof(int)*N);
int *Y = malloc(sizeof(int)*N);
int *answer = malloc(sizeof(int)*N);
int *answer_serial = malloc(sizeof(int)*N);
srand(time(NULL));
init(X);
init(Y);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum(X, Y, answer);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum_serial(X, Y, answer_serial);
double t_serial = (read_timer() - start_serial);
printf("X: ");
print_vector(X);
puts("+");
printf("Y: ");
print_vector(Y);
puts("=\n");
printf("SIMD:\n");
print_vector(answer);
puts("---------------------------------");
printf("Serial:\n");
print_vector(answer_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness:\t\t%d\n", check(answer_serial, answer));
free(X);
free(Y);
free(answer);
free(answer_serial);
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__bset_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32)
// C=scalar+B GB (_bind1st__bset_int32)
// C=scalar+B' GB (_bind1st_tran__bset_int32)
// C=A+scalar GB (_bind2nd__bset_int32)
// C=A'+scalar GB (_bind2nd_tran__bset_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bset_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utilityGraphPartitioner.h | // ***********************************************************************
//
// Grappolo: A C++ library for graph clustering
// Mahantesh Halappanavar (hala@pnnl.gov)
// Pacific Northwest National Laboratory
//
// ***********************************************************************
//
// Copyright (2014) Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#ifndef _graph_partitioner_
#define _graph_partitioner_
/*
int METIS PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part)
nvtxs: The number of vertices in the graph.
ncon: The number of balancing constraints. It should be at least 1.
xadj, adjncy: The adjacency structure of the graph as described in Section 5.5.
vwgt (NULL): The weights of the vertices as described in Section 5.5.
vsize (NULL): The size of the vertices for computing the total communication volume as described in Section 5.7.
adjwgt (NULL): The weights of the edges as described in Section 5.5.
nparts The number of parts to partition the graph.
tpwgts (NULL): This is an array of size npartsncon that specifies the desired weight for each partition and constraint.
The target partition weight for the ith partition and jth constraint is specified at tpwgts[i*ncon+j]
(the numbering for both partitions and constraints starts from 0). For each constraint, the sum of the
tpwgts[] entries must be 1.0 (i.e., \Sum_i tpwgts[i*ncon + j] = 1:0).
A NULL value can be passed to indicate that the graph should be equally divided among the partitions.
ubvec (NULL): This is an array of size ncon that specifies the allowed load imbalance tolerance for each constraint.
For the ith partition and jth constraint the allowed weight is the ubvec[j]*tpwgts[i*ncon+j] fraction
of the jth’s constraint total weight. The load imbalances must be greater than 1.0.
A NULL value can be passed indicating that the load imbalance tolerance for each constraint should
be 1.001 (for ncon=1) or 1.01 (for ncon<1).
options (NULL):
This is the array of options as described in Section 5.4.
The following options are valid for METIS PartGraphRecursive:
METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE, METIS_OPTION_RTYPE,
METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS, METIS_OPTION_NITER,
METIS_OPTION_SEED, METIS_OPTION_UFACTOR, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
The following options are valid for METIS PartGraphKway:
METIS_OPTION_OBJTYPE, METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE,
METIS_OPTION_RTYPE, METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS,
METIS_OPTION_NITER, METIS_OPTION_UFACTOR, METIS_OPTION_MINCONN,
METIS_OPTION_CONTIG, METIS_OPTION_SEED, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
objval: Upon successful completion, this variable stores the edge-cut or the total communication volume of
the partitioning solution. The value returned depends on the partitioning’s objective function.
part: This is a vector of size nvtxs that upon successful completion stores the partition vector of the graph.
The numbering of this vector starts from either 0 or 1, depending on the value of
options[METIS OPTION NUMBERING].
Returns
METIS OK Indicates that the function returned normally.
METIS ERROR INPUT Indicates an input error.
METIS ERROR MEMORY Indicates that it could not allocate the required memory.
METIS ERROR Indicates some other type of error.
*/
extern "C" {
#include "metis.h"
}
using namespace std;
/*
#ifdef __cplusplus
extern "C" {
#endif
//Multilevel k-way Partitioning
int METIS_PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part);
#ifdef __cplusplus
}
#endif
*/
//METIS Graph Partitioner:
void MetisGraphPartitioner( graph *G, long *VertexPartitioning, int numParts ) {
printf("Within MetisGraphPartitioner(): \n");
printf("Number of partitions requested: %ld\n", numParts);
//Get the iterators for the graph:
long NV = G->numVertices;
long NE = G->numEdges;
long *vtxPtr = G->edgeListPtrs;
edge *vtxInd = G->edgeList;
printf("|V|= %ld, |E|= %ld \n", NV, NE);
idx_t nvtxs = (idx_t) NV;
idx_t *xadj = (idx_t *) malloc ((NV+1) * sizeof(idx_t));
assert(xadj != 0);
#pragma omp parallel for
for(long i=0; i<=NV; i++) {
xadj[i] = (idx_t) vtxPtr[i];
}
idx_t *adjncy = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjncy != 0);
#pragma omp parallel for
for(long i=0; i<2*NE; i++) {
adjncy[i] = (idx_t) vtxInd[i].tail;
}
idx_t *adjwgt = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjwgt != 0);
#pragma omp parallel for
for(long i=0; i<2*NE; i++) {
adjwgt[i] = (idx_t) vtxInd[i].weight;
}
idx_t nparts = (idx_t) numParts;
real_t ubvec = 1.03;
idx_t options[METIS_NOPTIONS];
METIS_SetDefaultOptions(options);
options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; //Edgecut minimization
options[METIS_OPTION_CTYPE] = METIS_CTYPE_SHEM; //Sorted heavy-edge matching
options[METIS_OPTION_NUMBERING]= 0; //C-style numbering, starting from 0
//options[METIS_OPTION_NO2HOP]= 0; //Performs a 2-hop matching -- effective for power-law graphs
options[METIS_OPTION_NSEPS]= 10; //Number of iterations for refinement
//options[METIS_OPTION_UFACTOR] = 30;
idx_t ncon = 1; //Number of balancing constraints (at least 1)
idx_t objval = 0; //Will contain the edgecut (or total communication)
idx_t *part = (idx_t *) malloc (NV * sizeof(idx_t)); //Partition information
assert(part != 0);
int returnVal = METIS_PartGraphKway(&nvtxs, &ncon, xadj, adjncy, NULL, NULL, adjwgt,
&nparts, NULL, NULL, options, &objval, part);
if(returnVal == METIS_OK)
printf("Edge cut: %ld\n", objval);
else {
if(returnVal == METIS_ERROR_MEMORY)
printf("Metis could not allocate memory.\n");
else
printf("Metis error: %ld\n", returnVal);
}
#pragma omp parallel for
for(long i=0; i<=NV; i++) {
VertexPartitioning[i] = (long) part[i]; //Do explicit typecasts
}
//Cleaup:
free(xadj); free(adjncy); free(adjwgt);
free(part);
printf("Returning back from Metis\n");
}
#endif
|
pr27499.c | /* PR c/27499 */
/* { dg-do compile } */
extern void bar (unsigned int);
void
foo (void)
{
unsigned int i;
#pragma omp parallel for
for (i = 0; i < 64; ++i)
bar (i);
}
|
iwbt.c | //iwbt -- compute wet or icebult from air and dewpoint temperatures
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <errno.h>
#include <omp.h>
#include "envphys_c.h"
#include "envphys.h"
#define RH2O 461.5 /* Gas constant for water vapor (J/kg/K) */
#define EPS (MOL_H2O/MOL_AIR) /* Ratio of moleculr weights of water and dry air */
//#define CONVERGE 0.1 /* Convergence value */
double wetbulb(
double ta, /* air tempterature (K) */
double dpt, /* dewpoint temperature (K) */
double press, /* total air pressure (Pa) */
double tol) /* wet_bulb tolerance threshold */
{
int i;
double ea; /* vapor pressure (Pa) */
double esat; /* saturation ea @ Ta (Pa) */
double xlh; /* latent heat of vaporization + fusion (sublimation) (J/kg) */
double xlhv; /* latent heat of vaporization *(J/kg) */
double xlhf; /* latent heat of fusion *(J/kg) */
double fu_fac; /* fudge factor for xlh stradeling 0 */
double psyc; /* Psychrometric "constant" (K/Pa) */
double dedt; /* Change in ea with temperature (Pa/K) */
double pf; /* Psychrometer value (K) */
double dpdt; /* Change in pf with temperature */
double ti; /* wet or ice bulb temperature (K) */
double ti0; /* initial value for ti */
double dti; /* closure value */
/* find latent heat of vaporization, or vaporization + fusion */
if (ta <= FREEZE) {
xlhv = LH_VAP((ta + dpt) / 2.0);
xlhf = LH_FUS((ta + dpt) / 2.0);
xlh = xlhv + xlhf;
}
else if (dpt <= FREEZE) {
xlhv = LH_VAP((ta + dpt) / 2.0);
xlhf = LH_FUS((FREEZE + dpt) / 2.0);
fu_fac = ((FREEZE - dpt) / (ta - dpt));
xlh = xlhv + (fu_fac * xlhf);
}
else
xlh = LH_VAP((ta+dpt)/2);
/* vapor pressure and saturation vapor pressure at ta */
ea = sati(dpt);
esat = sati(ta);
/* Psychrometric "constant" (K/Pa) */
psyc = EPS * (xlh / (CP_AIR * press));
/* solve for wet or ice bulb temperature */
dti = 1.0;
i = 0;
ti = ta;
while (dti > tol) {
ti0 = ti;
if (ti != ta)
esat = sati(ti);
dedt = xlh * (esat / (RH2O * (ti*ti)));
pf = (ti - ta) + (psyc * (esat - ea));
dpdt = 1.0 + (psyc * dedt);
ti = ti - (pf / dpdt);
dti = ti0 - ti;
i++;
if (i > 10){
printf("failure to converge in 10 iterations");
exit(-1);
}
}
return(ti);
}
//Function to calculate the wet bult temeprature of the whole image
void iwbt (
int ngrid, /* number of grid points */
double *ta, /* air temperature */
double *td, /* dew point temperature */
double *z, /* elevation */
int nthreads, /* number of threads for parrallel processing */
double tol, /* wet_bulb tolerance threshold */
double *tw) /* wet bulb temperature (return) */
{
int samp;
double td_p; /* dew point temperature (C) */
double tw_p; /* wet bulb temperature (C) */
double ta_p; /* air temperature (C) */
double z_p; /* elevation (m) */
double pa_p; /* air pressure (pa) */
omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(nthreads); // Use N threads for all consecutive parallel regions
#pragma omp parallel shared(ngrid, ta, td, z) private(samp, ta_p, tw_p, z_p, pa_p, td_p)
{
#pragma omp for
for (samp=0; samp < ngrid; samp++) {
// get pixel values
ta_p = ta[samp];
td_p = td[samp];
z_p = z[samp];
/* set pa */
if (z_p == 0.0) {
pa_p = SEA_LEVEL;
}
else {
pa_p = HYSTAT (SEA_LEVEL, STD_AIRTMP, STD_LAPSE,
(z_p / 1000.0), GRAVITY, MOL_AIR);
}
/* convert ta & td to Kelvin */
ta_p += FREEZE;
td_p += FREEZE;
if(ta_p < 0 || td_p < 0){
printf("ta or td < 0 at pixel %i", samp);
exit(-1);
}
/* call wetbulb function & fill output buffer */
tw_p = wetbulb(ta_p, td_p, pa_p, tol);
// put back in array
tw[samp] = tw_p - FREEZE;
}
}
}
|
im2col_dnnlowp.h | #pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace math {
template <typename T>
static void Im2ColNCHW(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const T& zero_point = 0) {
const int output_h =
(height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h +
1;
const int output_w =
(width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
// Fast path for zero padding and no dilation
// From Torch, THNN_(unfolded_copy)
if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 &&
pad_t == 0 && pad_b == 0) {
for (auto k = 0; k < channels * kernel_h * kernel_w; k++) {
const auto nip = k / (kernel_h * kernel_w);
const auto rest = k % (kernel_h * kernel_w);
const auto kh = rest / kernel_w;
const auto kw = rest % kernel_w;
auto* dst = data_col + nip * (kernel_h * kernel_w * output_h * output_w) +
kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w);
const auto* src = data_im + nip * (height * width);
for (auto y = 0; y < output_h; y++) {
const auto iy = y * stride_h + kh;
const auto ix = kw;
if (stride_w == 1) {
memcpy(
dst + (y * output_w),
src + (iy * width + ix),
sizeof(T) * output_w);
} else {
for (auto x = 0; x < output_w; x++) {
memcpy(
dst + (y * output_w + x),
src + (iy * width + ix + x * stride_w),
sizeof(T));
}
}
}
}
return;
}
// Fast path for equal padding
if (pad_l == pad_r && pad_t == pad_b) {
// From Intel, https://github.com/BVLC/caffe/pull/3536
const int pad_h = pad_t;
const int pad_w = pad_l;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!utils::IsAGeZeroAndALtB(input_row, height)) {
for (int output_cols = output_w; output_cols; output_cols--) {
*(data_col++) = zero_point;
}
} else {
int input_col = -pad_w + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (utils::IsAGeZeroAndALtB(input_col, width)) {
*(data_col++) = data_im[input_row * width + input_col];
} else {
*(data_col++) = zero_point;
}
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
return;
}
// Baseline
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / kernel_h / kernel_w;
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
int h_pad = h * stride_h - pad_t + h_offset * dilation_h;
int w_pad = w * stride_w - pad_l + w_offset * dilation_w;
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
data_col[(c * height_col + h) * width_col + w] =
data_im[(c_im * height + h_pad) * width + w_pad];
else
data_col[(c * height_col + h) * width_col + w] = zero_point;
}
}
}
}
template <typename T>
static void Im2ColNdNCHW(
const int N,
const int /* img_size*/,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const T* X_data,
T* Y_data,
CPUContext* /* context */,
const T& zero_point = 0) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
std::vector<int> d_offset(N, 0);
std::vector<int> d_iter(N, 0);
for (int i = 0; i < outer_size; ++i) {
// Loop over spatial axes in reverse order to compute a per-axis offset.
int offset = i;
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset % kernel_shape[d_i];
offset /= kernel_shape[d_i];
}
for (int j = 0; j < inner_size; ++j) {
// Loop over spatial axes in forward order to compute the indices in the
// image and column, and whether the index lies in the padding.
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride[d_i] - pad[d_i] +
d_offset[d_i] * dilation[d_i];
is_padding |= d_img < 0 || d_img >= img_shape[d_i + 1];
img_index = img_index * img_shape[d_i + 1] + d_img;
}
Y_data[col_index] = is_padding ? zero_point : X_data[img_index];
utils::IncreaseIndexInDims(N, col_shape + 1, d_iter.data());
}
}
}
/**
* The layout of the result is N H W G R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2ColNHWC(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int h = 0; h < height_col; ++h) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp =
data_col + h * width_col * kernel_h * kernel_w * channels;
int w_pad = -pad_l;
for (int w = 0; w < width_col; ++w) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) {
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
((g * kernel_h + r) * kernel_w + s) * (channels / groups),
data_im + (ih * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (int g = 0; g < groups; ++g) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[(((g * kernel_h + r) * kernel_w) + s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
data_col_temp += kernel_h * kernel_w * channels;
w_pad += stride_w;
} // for each output pixel
} // for each image row
}
/**
* The layout of the result is N T H W G Q R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2Col3DNHWC(
const int channels,
const int num_frames,
const int height,
const int width,
const int kernel_t,
const int kernel_h,
const int kernel_w,
const int dilation_t,
const int dilation_h,
const int dilation_w,
const int pad_p, // previous frame
const int pad_t, // top
const int pad_l, // left
const int pad_n, // next frame
const int pad_b, // bottom
const int pad_r, // right
const int stride_t,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_t = dilation_t * (kernel_t - 1) + 1;
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int frame_col = (num_frames + pad_p + pad_n - dkernel_t) / stride_t + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int t = 0; t < frame_col; ++t) {
int t_pad = -pad_p + t * stride_t;
for (int h = 0; h < height_col; ++h) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp = data_col +
(t * height_col + h) * width_col * kernel_t * kernel_h * kernel_w *
channels;
for (int w = 0; w < width_col; ++w) {
int w_pad = -pad_l + w * stride_w;
int q = 0;
for (int it = t_pad; it < t_pad + dkernel_t; it += dilation_t, ++q) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w;
iw += dilation_w, ++s) {
if (it >= 0 && it < num_frames && ih >= 0 && ih < height &&
iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
(((g * kernel_t + q) * kernel_h + r) * kernel_w + s) *
(channels / groups),
data_im + ((it * height + ih) * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (int g = 0; g < groups; ++g) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[((((g * kernel_t + q) * kernel_h + r) * kernel_w) +
s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
} // for each it
data_col_temp += kernel_t * kernel_h * kernel_w * channels;
} // for each output pixel
} // for each image row
} // for each frame
}
} // namespace math
} // namespace caffe2
|
vector.h | #include <iostream>
#include <omp.h>
template <class I, class T>
class Vector {
public:
I max_print_length = 7;
// 构造函数与析构函数
Vector(I length) {
this->length = length;
this->data = new T[length];
}
Vector(const Vector &other) {
this->initialize(other.length, other.data);
}
Vector(Vector &&other) {
this->length = other.length;
this->data = other.data;
other.data = nullptr;
}
~Vector(void) {
delete [] this->data;
this->data = nullptr;
}
// 运算符重载
T &operator[](I index) {
return this->data[
(index%this->length+this->length)%this->length
];
}
Vector<I, T> &operator=(const Vector<I, T> &other) {
if (this != &other)
this->initialize(other.length, other.data);
return *this;
}
Vector<I, T> &operator=(Vector<I, T> &&other) {
if (this != &other) {
delete [] this->data;
this->length = other.length;
this->data = other.data;
other.data = nullptr;
}
return *this;
}
T operator*(Vector<I, T> &other) {
return this->dot(other);
}
T operator*(Vector<I, T> &&other) {
return this->dot(other);
}
Vector<I, T> operator+(Vector<I, T> &other) {
return this->plus(other);
}
Vector<I, T> operator+(Vector<I, T> &&other) {
return this->plus(other);
}
Vector<I, T> operator-(Vector<I, T> &other) {
return this->minus(other);
}
Vector<I, T> operator-(Vector<I, T> &&other) {
return this->minus(other);
}
// 功能性函数
I get_length(void) {
return this->length;
}
T *get_data(void) {
return this->data;
}
Vector<I, T> &map(auto function) {
for (I ith=0; ith<this->length; ith++)
this->data[ith] = function();
return *this;
}
void unify(T value) {
this->map([value] () -> T { return value; });
}
void print(void) {
I length = std::min<I>(this->length, this->max_print_length);
std::cout << "<Vector(" << this->length << ") @ (";
for (I ith=0; ith<length; ith++)
std::cout << this->data[ith] << ", ";
if (length != this->length)
std::cout << "...";
std::cout << ")>" << std::endl;
}
// 二元运算符
T dot(Vector<I, T> &other) {
this->has_same_length_with(other);
T sum = 0;
for (I ith=0; ith<this->length; ith++)
sum += this->data[ith] * other[ith];
return sum;
}
T dot_mp(Vector<I, T> &other) {
this->has_same_length_with(other);
T sum = 0;
#pragma omp parallel for reduction(+:sum)
for (I ith=0; ith<this->length; ith++)
sum += this->data[ith] * other[ith];
return sum;
}
Vector<I, T> plus(Vector<I, T> &other) {
this->has_same_length_with(other);
Vector<I, T> result(this->length);
for (I ith=0; ith<this->length; ith++)
result[ith] = this->data[ith] + other[ith];
return result;
}
Vector<I, T> minus(Vector<I, T> &other) {
this->has_same_length_with(other);
Vector<I, T> result(this->length);
for (I ith=0; ith<this->length; ith++)
result[ith] = this->data[ith] - other[ith];
return result;
}
private:
// 私有变量(不允许外部修改)
I length;
T *data = nullptr;
// 私有函数(仅用作合并重复项)
void has_same_length_with(Vector<I, T> &other) {
if (this->length != other.length)
throw "Length does not match.";
}
void initialize(I length, T *data) {
delete [] this->data;
this->length = length;
this->data = new T[length];
for (I ith=0; ith<length; ith++)
this->data[ith] = data[ith];
}
};
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
9815.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 16)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 16)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
pmv-OpenMP-b_atcgrid.c | #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_dynamic(0);
#define omp_set_num_threads(12);
#endif
int main(int argc, char ** argv){
int **M;
int *v1, *v2;
int i, k, N;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
time_t t;
// Semilla de rand()
srand((unsigned) time(&t));
// Obtenemos el numero de filas x columnas de la matriz cuadrada
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
N = atoi(argv[1]);
// == Reserva de Memoria
// ====================================================>
v1 = (int *) malloc(N*sizeof(int));
v2 = (int *) malloc(N*sizeof(int));
if ( v1 == NULL || v2 == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
M = (int**) malloc (N*sizeof(int*));
// i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue
// i = 0, i = 3, i = 6 para un bucle de N = 9
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++){
M[i] = (int*) malloc (N*sizeof(int));
if( M[i] == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
}
// == Inicializacion
// ====================================================>
// M, v1, v2, N, i compartidas
// Cada hebra se encargará de una parte del bucle usando i
// k es privada
// Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial
for(i = 0; i<N; i++){
#pragma omp parallel for shared(M,i,N) private(k) default(none)
for(k = 0; k<N; k++)
M[i][k] = rand() % 8;
}
#pragma omp parallel for shared(v1,v2,N) private(i) default(none)
for(i = 0; i<N; i++){
v1[i] = rand() % 6;
v2[i] = 0;
}
// == Calculo
// ====================================================>
cgt1 = omp_get_wtime();
// Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia
// local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones.
for(i = 0; i<N; i++){
#pragma omp parallel shared(M,i,N,v2,v1) private(k) default(none)
{
int sumalocal = 0;
#pragma omp for
for(k = 0; k<N; k++)
sumalocal += M[i][k] * v1[k];
#pragma omp atomic
v2[i] += sumalocal;
}
}
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
// == Imprimir Mensajes
// ====================================================>
printf("Tiempo(seg.):%11.9f\n", ncgt);
printf("Tamaño de los vectores: %u\n", N);
printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int));
printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int));
// Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador
// eliminen el código de la suma.
printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]);
// Para tamaños pequeños de N < 15 mostrar los valores calculados
if(N < 15){
printf("\n----------- Matriz M ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", M[i][k]);
printf("\n");
}
printf("\n----------- Vector V1 ----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v1[i]);
printf("\n");
printf("\n----------- Vector V2----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v2[i]);
printf("\n");
}
// == Liberar Memoria
// ====================================================>
free(v1);
free(v2);
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++)
free(M[i]);
free(M);
} |
ex1.c | /*
Kompilacja i przykładowe uruchomienie (ilość wątków przekazuję przez env):
gcc -Wall ex1.c -o ex1 -fopenmp -lm
env OMP_NUM_THREADS=4 ./ex1 1000000 10000
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <math.h>
double startTimer, endTimer, startTotalTimer, endTotalTimer;
// Struktura bucketa do trzymania danych i rozmiaru
struct bucket
{
int count;
int *value;
};
// Funkcja porównywawcza dla quicksorta
int compareIntegers(const void *first, const void *second)
{
int x = *((int *)first), y = *((int *)second);
if (x == y)
{
return 0;
}
else if (x < y)
{
return -1;
}
else
{
return 1;
}
}
void splitToBuckets(int array[], int n, int maxRange, struct bucket buckets[], int bucketsSize)
{
#pragma omp parallel
{
int startIndex = (double)omp_get_thread_num() / omp_get_max_threads() * n;
int i, indexCounter;
for (i = startIndex, indexCounter = 0; indexCounter < n; i++, indexCounter++)
{
// Przydzielam wartość do bucketu biorac zakres wartości i ilości bucketów
int bucketIndex = (double)array[i % n] / maxRange * bucketsSize;
int startBucketIndex = ceil((double)omp_get_thread_num() / omp_get_max_threads() * bucketsSize);
int endBucketIndex = ceil((double)(omp_get_thread_num() + 1) / omp_get_max_threads() * bucketsSize - 1);
// Thread rozpatrza wartości tylko w zakresie swoich bucketów
if (bucketIndex >= startBucketIndex && bucketIndex <= endBucketIndex)
{
buckets[bucketIndex].value[buckets[bucketIndex].count++] = array[i % n];
}
}
}
}
void sortBuckets(int array[], struct bucket buckets[], int bucketsSize)
{
#pragma omp parallel
{
int startIndex = ceil((double)omp_get_thread_num() / omp_get_max_threads() * bucketsSize);
int endIndex = ceil((double)(omp_get_thread_num() + 1) / omp_get_max_threads() * bucketsSize - 1);
int i;
for (i = startIndex; i <= endIndex; i++)
{
qsort(buckets[i].value, buckets[i].count, sizeof(int), &compareIntegers);
}
}
}
void mergeBuckets(int array[], struct bucket buckets[], int bucketsSize)
{
#pragma omp parallel
{
int i, j, k;
// Obliczanie punktu startowego od którego thread ma wypełniać tablicę posortowanymi danymi
int offset = 0;
int startIndex = ceil((double)omp_get_thread_num() / omp_get_max_threads() * bucketsSize);
int endIndex = ceil((double)(omp_get_thread_num() + 1) / omp_get_max_threads() * bucketsSize - 1);
for (i = 0; i < startIndex; i++)
{
offset += buckets[i].count;
}
// Thread wypełnia od punktu startowego równego rozmiarom poprzednich bucketów przechodząc przez swoje buckety
for (k = offset, i = startIndex; i <= endIndex; i++)
{
for (j = 0; j < buckets[i].count; j++)
{
array[k + j] = buckets[i].value[j];
}
k += buckets[i].count;
}
}
}
void bucketSort(int array[], int n, int maxRange, int bucketsSize)
{
int i;
// Inicjalizacja bucketów i ich tablic danych
struct bucket *buckets = (struct bucket *)malloc(sizeof(struct bucket) * bucketsSize);
for (i = 0; i < bucketsSize; i++)
{
buckets[i].count = 0;
buckets[i].value = (int *)malloc(sizeof(int) * n);
}
startTimer = omp_get_wtime();
splitToBuckets(array, n, maxRange, buckets, bucketsSize);
endTimer = omp_get_wtime();
printf("Splitting into buckets took: %lf\n", endTimer - startTimer);
startTimer = omp_get_wtime();
// Sortowanie konkretnych bucketów przy pomocy np. quicksorta
sortBuckets(array, buckets, bucketsSize);
endTimer = omp_get_wtime();
printf("Sorting the buckets took: %lf\n", endTimer - startTimer);
startTimer = omp_get_wtime();
mergeBuckets(array, buckets, bucketsSize);
endTimer = omp_get_wtime();
printf("Merging into initial array took: %lf\n", endTimer - startTimer);
// Czyszczenie
for (i = 0; i < bucketsSize; i++)
{
free(buckets[i].value);
}
free(buckets);
}
void sortCheck(int array[], int n)
{
int i;
for (i = 0; i < n - 1; i++)
{
if (array[i] > array[i + 1])
{
printf("Array is not sorted\n");
return;
}
}
printf("Array is sorted\n");
}
void populateArray(int array[], int n, int maxRange)
{
int i;
unsigned short xi[3];
#pragma omp parallel private(xi)
{
unsigned threadSeed = (unsigned)(time(NULL)) ^ omp_get_thread_num();
xi[0] = threadSeed;
xi[1] = threadSeed;
xi[2] = threadSeed;
#pragma omp for
// Wypełnianie tablicy losowymi danymi w zakresie podanym w argumentach programu
for (i = 0; i < n; i++)
{
array[i] = (int)(erand48(xi) * maxRange);
}
}
}
int main(int argc, char **argv)
{
printf("\nN of threads: %d\n", omp_get_max_threads());
int n = atoi(argv[1]);
int maxRange = atoi(argv[2]);
int bucketsSize = atoi(argv[3]);
if (bucketsSize < omp_get_max_threads())
{
printf("Please provide more buckets than threads\n");
return -1;
}
int *array = (int *)malloc(n * sizeof(int));
startTimer = omp_get_wtime();
startTotalTimer = omp_get_wtime();
populateArray(array, n, maxRange);
endTimer = omp_get_wtime();
printf("Populating the array took: %lf\n", endTimer - startTimer);
bucketSort(array, n, maxRange, bucketsSize);
endTotalTimer = omp_get_wtime();
printf("The whole algorithm took: %lf\n", endTotalTimer - startTotalTimer);
// Sprawdzenie czy tablica jest posortowana
sortCheck(array, n);
// int i;
// for (i = 0; i < n; i++)
// {
// printf("%d\n", array[i]);
// }
free(array);
return 0;
} |
hessian_screen.c | /* Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
int int2e_cart();
int int2e_ipvip1_cart();
int int2e_spsp1spsp2_cart();
int int2e_spsp1spsp2_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
/*
* Gradients screening for grad/rhf.py
*/
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFgrad_jk_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFgrad_jk_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int ij, i, j, iijj, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 9 * di*di*di*di);
double *bufx = buf;
double *bufy, *bufz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*nbas; ij++) {
ish = ij / nbas;
jsh = ij - ish * nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
bufy = buf + 4*(di*dj*di*dj);
bufz = buf + 8*(di*dj*di*dj);
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufx[iijj]));
qtmp = MAX(qtmp, fabs(bufy[iijj]));
qtmp = MAX(qtmp, fabs(bufz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFgrad_jk_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
nbas = opt->nbas;
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const size_t nao = ao_loc[nbas];
double dmax;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
dmax = MAX(dmax, fabs(pdm[i*nao+j]));
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
* Hessian screening for hessian/rhf.py
*/
// ijkl,ji->kl
// ijkl,li->kj
// ijkl,lj->ki
int CVHFip1ip2_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+i] > dmin)
|| (opt->dm_cond[l*n+j] > dmin));
}
void CVHFip1ip2_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFip1ip2_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFipip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFipip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int ij, i, j, iijj, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 256 * di*di*di*di);
double *bufxx = buf;
double *bufxy, *bufxz, *bufyx, *bufyy, *bufyz, *bufzx, *bufzy, *bufzz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*nbas; ij++) {
ish = ij / nbas;
jsh = ij - ish * nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
iijj = di * dj * di * dj;
bufxy = buf + ( 1*16+ 1)*iijj;
bufxz = buf + ( 2*16+ 2)*iijj;
bufyx = buf + ( 4*16+ 4)*iijj;
bufyy = buf + ( 5*16+ 5)*iijj;
bufyz = buf + ( 6*16+ 6)*iijj;
bufzx = buf + ( 8*16+ 8)*iijj;
bufzy = buf + ( 9*16+ 9)*iijj;
bufzz = buf + (10*16+10)*iijj;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufxx[iijj]));
qtmp = MAX(qtmp, fabs(bufxy[iijj]));
qtmp = MAX(qtmp, fabs(bufxz[iijj]));
qtmp = MAX(qtmp, fabs(bufyx[iijj]));
qtmp = MAX(qtmp, fabs(bufyy[iijj]));
qtmp = MAX(qtmp, fabs(bufyz[iijj]));
qtmp = MAX(qtmp, fabs(bufzx[iijj]));
qtmp = MAX(qtmp, fabs(bufzy[iijj]));
qtmp = MAX(qtmp, fabs(bufzz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFipip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,li->kj
// ijkl,kl->ij
// ijkl,ki->lj
int CVHFipvip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[l*n+i] > dmin)
|| ( opt->dm_cond[k*n+i] > dmin));
}
void CVHFipvip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFipip1_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFipvip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
multiple_variables_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int* a = (int*)malloc(sizeof(int)*4);
int* b = (int*)malloc(sizeof(int)*4);
a[0] = 0;
a[1] = 1;
a[2] = 2;
a[3] = 3;
#pragma omp parallel
{
int rank = omp_get_thread_num();
b[rank] = a[rank];
}
printf("[%d,%d,%d,%d]\n", b[0], b[1], b[2], b[3]);
free(a);
free(b);
}
|
gmm.c | /** @file gmm.c
** @brief Gaussian Mixture Models - Implementation
** @author David Novotny
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2013 David Novotny and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm Gaussian Mixture Models (GMM)
@author David Novotny
@author Andrea Vedaldi
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref gmm.h is an implementation of *Gaussian Mixture Models* (GMMs).
The main functionality provided by this module is learning GMMs from
data by maximum likelihood. Model optimization uses the Expectation
Maximization (EM) algorithm @cite{dempster77maximum}. The
implementation supports @c float or @c double data types, is
parallelized, and is tuned to work reliably and effectively on
datasets of visual features. Stability is obtained in part by
regularizing and restricting the parameters of the GMM.
@ref gmm-starting demonstreates how to use the C API to compute the FV
representation of an image. For further details refer to:
- @subpage gmm-fundamentals
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
In order to use @ref gmm.h to learn a GMM from training data, create a
new ::VlGMM object instance, set the parameters as desired, and run
the training code. The following example learns @c numClusters
Gaussian components from @c numData vectors of dimension @c dimension
and storage class @c float using at most 100 EM iterations:
@code
float * means ;
float * covariances ;
float * priors ;
float * posteriors ;
double loglikelihood ;
// create a new instance of a GMM object for float data
gmm = vl_gmm_new (VL_TYPE_FLOAT, dimension, numClusters) ;
// set the maximum number of EM iterations to 100
vl_gmm_set_max_num_iterations (gmm, 100) ;
// set the initialization to random selection
vl_gmm_set_initialization (gmm,VlGMMRand);
// cluster the data, i.e. learn the GMM
vl_gmm_cluster (gmm, data, numData);
// get the means, covariances, and priors of the GMM
means = vl_gmm_get_means(gmm);
covariances = vl_gmm_get_covariances(gmm);
priors = vl_gmm_get_priors(gmm);
// get loglikelihood of the estimated GMM
loglikelihood = vl_gmm_get_loglikelihood(gmm) ;
// get the soft assignments of the data points to each cluster
posteriors = vl_gmm_get_posteriors(gmm) ;
@endcode
@note ::VlGMM assumes that the covariance matrices of the GMM are
diagonal. This reduces significantly the number of parameters to learn
and is usually an acceptable compromise in vision applications. If the
data is significantly correlated, it can be beneficial to de-correlate
it by PCA rotation or projection in pre-processing.
::vl_gmm_get_loglikelihood is used to get the final loglikelihood of
the estimated mixture, ::vl_gmm_get_means and ::vl_gmm_get_covariances
to obtain the means and the diagonals of the covariance matrices of
the estimated Gaussian modes, and ::vl_gmm_get_posteriors to get the
posterior probabilities that a given point is associated to each of
the modes (soft assignments).
The learning algorithm, which uses EM, finds a local optimum of the
objective function. Therefore the initialization is crucial in
obtaining a good model, measured in term of the final
loglikelihood. ::VlGMM supports a few methods (use
::vl_gmm_set_initialization to choose one) as follows:
Method | ::VlGMMInitialization enumeration | Description
----------------------|-----------------------------------------|-----------------------------------------------
Random initialization | ::VlGMMRand | Random initialization of the mixture parameters
KMeans | ::VlGMMKMeans | Initialization of the mixture parameters using ::VlKMeans
Custom | ::VlGMMCustom | User specified initialization
Note that in the case of ::VlGMMKMeans initialization, an object of
type ::VlKMeans object must be created and passed to the ::VlGMM
instance (see @ref kmeans to see how to correctly set up this object).
When a user wants to use the ::VlGMMCustom method, the initial means,
covariances and priors have to be specified using the
::vl_gmm_set_means, ::vl_gmm_set_covariances and ::vl_gmm_set_priors
methods.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm-fundamentals GMM fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
A *Gaussian Mixture Model* (GMM) is a mixture of $K$ multivariate
Gaussian distributions. In order to sample from a GMM, one samples
first the component index $k \in \{1,\dots,K\}$ with *prior
probability* $\pi_k$, and then samples the vector $\bx \in
\mathbb{R}^d$ from the $k$-th Gaussian distribution
$p(\bx|\mu_k,\Sigma_k)$. Here $\mu_k$ and $\Sigma_k$ are respectively
the *mean* and *covariance* of the distribution. The GMM is completely
specified by the parameters $\Theta=\{\pi_k,\mu_k,\Sigma_k; k =
1,\dots,K\}$
The density $p(\bx|\Theta)$ induced on the training data is obtained
by marginalizing the component selector $k$, obtaining
\[
p(\bx|\Theta)
= \sum_{k=1}^{K} \pi_k p( \bx_i |\mu_k,\Sigma_k),
\qquad
p( \bx |\mu_k,\Sigma_k)
=
\frac{1}{\sqrt{(2\pi)^d\det\Sigma_k}}
\exp\left[
-\frac{1}{2} (\bx-\mu_k)^\top\Sigma_k^{-1}(\bx-\mu_k)
\right].
\]
Learning a GMM to fit a dataset $X=(\bx_1, \dots, \bx_n)$ is usually
done by maximizing the log-likelihood of the data:
@f[
\ell(\Theta;X)
= E_{\bx\sim\hat p} [ \log p(\bx|\Theta) ]
= \frac{1}{n}\sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi_k p(\bx_i|\mu_k, \Sigma_k)
@f]
where $\hat p$ is the empirical distribution of the data. An algorithm
to solve this problem is introduced next.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-em Learning a GMM by expectation maximization
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The direct maximization of the log-likelihood function of a GMM is
difficult due to the fact that the assignments of points to Gaussian
mode is not observable and, as such, must be treated as a latent
variable.
Usually, GMMs are learned by using the *Expectation Maximization* (EM)
algorithm @cite{dempster77maximum}. Consider in general the problem of
estimating to the maximum likelihood a distribution $p(x|\Theta) =
\int p(x,h|\Theta)\,dh$, where $x$ is a measurement, $h$ is a *latent
variable*, and $\Theta$ are the model parameters. By introducing an
auxiliary distribution $q(h|x)$ on the latent variable, one can use
Jensen inequality to obtain the following lower bound on the
log-likelihood:
@f{align*}
\ell(\Theta;X) =
E_{x\sim\hat p} \log p(x|\Theta)
&= E_{x\sim\hat p} \log \int p(x,h|\Theta) \,dh \\
&= E_{x\sim\hat p} \log \int \frac{p(x,h|\Theta)}{q(h|x)} q(h|x)\,dh \\
&\geq E_{x\sim\hat p} \int q(h) \log \frac{p(x,h|\Theta)}{q(h|x)}\,dh \\
&= E_{(x,q) \sim q(h|x) \hat p(x)} \log p(x,h|\Theta) -
E_{(x,q) \sim q(h|x) \hat p(x)} \log q(h|x)
@f}
The first term of the last expression is the log-likelihood of the
model where both the $x$ and $h$ are observed and joinlty distributed
as $q(x|h)\hat p(x)$; the second term is the a average entropy of the
latent variable, which does not depend on $\Theta$. This lower bound
is maximized and becomes tight by setting $q(h|x) = p(h|x,\Theta)$ to
be the posterior distribution on the latent variable $h$ (given the
current estimate of the parameters $\Theta$). In fact:
\[
E_{x \sim \hat p} \log p(x|\Theta)
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)}\left[ \log \frac{p(x,h|\Theta)}{p(h|x,\Theta)} \right]
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)} [ \log p(x|\Theta) ]
=
\ell(\Theta;X).
\]
EM alternates between updating the latent variable auxiliary
distribution $q(h|x) = p(h|x,\Theta_t)$ (*expectation step*) given the
current estimate of the parameters $\Theta_t$, and then updating the
model parameters $\Theta_{t+1}$ by maximizing the log-likelihood lower
bound derived (*maximization step*). The simplification is that in the
maximization step both $x$ and $h$ are now ``observed'' quantities.
This procedure converges to a local optimum of the model
log-likelihood.
@subsection gmm-expectation-step Expectation step
In the case of a GMM, the latent variables are the point-to-cluster
assignments $k_i, i=1,\dots,n$, one for each of $n$ data points. The
auxiliary distribution $q(k_i|\bx_i) = q_{ik}$ is a matrix with $n
\times K$ entries. Each row $q_{i,:}$ can be thought of as a vector of
soft assignments of the data points $\bx_i$ to each of the Gaussian
modes. Setting $q_{ik} = p(k_i | \bx_i, \Theta)$ yields
\[
q_{ik} =
\frac
{\pi_k p(\bx_i|\mu_k,\Sigma_k)}
{\sum_{l=1}^K \pi_l p(\bx_i|\mu_l,\Sigma_l)}
\]
where the Gaussian density $p(\bx_i|\mu_k,\Sigma_k)$ was given above.
One important point to keep in mind when these probabilities are
computed is the fact that the Gaussian densities may attain very low
values and underflow in a vanilla implementation. Furthermore, VLFeat
GMM implementation restricts the covariance matrices to be
diagonal. In this case, the computation of the determinant of
$\Sigma_k$ reduces to computing the trace of the matrix and the
inversion of $\Sigma_k$ could be obtained by inverting the elements on
the diagonal of the covariance matrix.
@subsection gmm-maximization-step Maximization step
The M step estimates the parameters of the Gaussian mixture components
and the prior probabilities $\pi_k$ given the auxiliary distribution
on the point-to-cluster assignments computed in the E step. Since all
the variables are now ``observed'', the estimate is quite simple. For
example, the mean $\mu_k$ of a Gaussian mode is obtained as the mean
of the data points assigned to it (accounting for the strength of the
soft assignments). The other quantities are obtained in a similar
manner, yielding to:
@f{align*}
\mu_k &= { { \sum_{i=1}^n q_{ik} \bx_{i} } \over { \sum_{i=1}^n q_{ik} } },
\\
\Sigma_k &= { { \sum_{i=1}^n { q_{ik} (\bx_{i} - \mu_{k}) {(\bx_{i} - \mu_{k})}^T } } \over { \sum_{i=1}^n q_{ik} } },
\\
\pi_k &= { \sum_{i=1}^n { q_{ik} } \over { \sum_{i=1}^n \sum_{l=1}^K q_{il} } }.
@f}
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-fundamentals-init Initialization algorithms
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The EM algorithm is a local optimization method. As such, the quality
of the solution strongly depends on the quality of the initial values
of the parameters (i.e. of the locations and shapes of the Gaussian
modes).
@ref gmm.h supports the following cluster initialization algorithms:
- <b>Random data points.</b> (::vl_gmm_init_with_rand_data) This method
sets the means of the modes by sampling at random a corresponding
number of data points, sets the covariance matrices of all the modes
are to the covariance of the entire dataset, and sets the prior
probabilities of the Gaussian modes to be uniform. This
initialization method is the fastest, simplest, as well as the one
most likely to end in a bad local minimum.
- <b>KMeans initialization</b> (::vl_gmm_init_with_kmeans) This
method uses KMeans to pre-cluster the points. It then sets the means
and covariances of the Gaussian distributions the sample means and
covariances of each KMeans cluster. It also sets the prior
probabilities to be proportional to the mass of each cluster. In
order to use this initialization method, a user can specify an
instance of ::VlKMeans by using the function
::vl_gmm_set_kmeans_init_object, or let ::VlGMM create one
automatically.
Alternatively, one can manually specify a starting point
(::vl_gmm_set_priors, ::vl_gmm_set_means, ::vl_gmm_set_covariances).
**/
#include "gmm.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef VL_DISABLE_SSE2
#include "mathop_sse2.h"
#endif
#ifndef VL_DISABLE_AVX
#include "mathop_avx.h"
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
#define VL_GMM_MIN_VARIANCE 1e-6
#define VL_GMM_MIN_POSTERIOR 1e-2
#define VL_GMM_MIN_PRIOR 1e-6
struct _VlGMM
{
vl_type dataType ; /**< Data type. */
vl_size dimension ; /**< Data dimensionality. */
vl_size numClusters ; /**< Number of clusters */
vl_size numData ; /**< Number of last time clustered data points. */
vl_size maxNumIterations ; /**< Maximum number of refinement iterations. */
vl_size numRepetitions ; /**< Number of clustering repetitions. */
int verbosity ; /**< Verbosity level. */
void * means; /**< Means of Gaussian modes. */
void * covariances; /**< Diagonals of covariance matrices of Gaussian modes. */
void * priors; /**< Weights of Gaussian modes. */
void * posteriors; /**< Probabilities of correspondences of points to clusters. */
double * sigmaLowBound ; /**< Lower bound on the diagonal covariance values. */
VlGMMInitialization initialization; /**< Initialization option */
VlKMeans * kmeansInit; /**< Kmeans object for initialization of gaussians */
double LL ; /**< Current solution loglikelihood */
vl_bool kmeansInitIsOwner; /**< Indicates whether a user provided the kmeans initialization object */
} ;
/* ---------------------------------------------------------------- */
/* Life-cycle */
/* ---------------------------------------------------------------- */
static void
_vl_gmm_prepare_for_data (VlGMM* self, vl_size numData)
{
if (self->numData < numData) {
vl_free(self->posteriors) ;
self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ;
}
self->numData = numData ;
}
/** @brief Create a new GMM object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param dimension dimension of the data.
** @param numComponents number of Gaussian mixture components.
** @return new GMM object instance.
**/
VlGMM *
vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents)
{
vl_index i ;
vl_size size = vl_get_type_size(dataType) ;
VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ;
self->dataType = dataType;
self->numClusters = numComponents ;
self->numData = 0;
self->dimension = dimension ;
self->initialization = VlGMMRand;
self->verbosity = 0 ;
self->maxNumIterations = 50;
self->numRepetitions = 1;
self->sigmaLowBound = NULL ;
self->priors = NULL ;
self->covariances = NULL ;
self->means = NULL ;
self->posteriors = NULL ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE;
self->priors = vl_calloc (numComponents, size) ;
self->means = vl_calloc (numComponents * dimension, size) ;
self->covariances = vl_calloc (numComponents * dimension, size) ;
self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ;
for (i = 0 ; i < (unsigned)self->dimension ; ++i) { self->sigmaLowBound[i] = 1e-4 ; }
return self ;
}
/** @brief Reset state
** @param self object.
**
** The function reset the state of the GMM object. It deletes
** any stored posterior and other internal state variables.
**/
void
vl_gmm_reset (VlGMM * self)
{
if (self->posteriors) {
vl_free(self->posteriors) ;
self->posteriors = NULL ;
self->numData = 0 ;
}
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE ;
}
}
/** @brief Deletes a GMM object
** @param self GMM object instance.
**
** The function deletes the GMM object instance created
** by ::vl_gmm_new.
**/
void
vl_gmm_delete (VlGMM * self)
{
if(self->means) vl_free(self->means);
if(self->covariances) vl_free(self->covariances);
if(self->priors) vl_free(self->priors);
if(self->posteriors) vl_free(self->posteriors);
if(self->sigmaLowBound) vl_free(self->sigmaLowBound);
if(self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit);
}
vl_free(self);
}
/* ---------------------------------------------------------------- */
/* Getters and setters */
/* ---------------------------------------------------------------- */
/** @brief Get data type
** @param self object
** @return data type.
**/
vl_type
vl_gmm_get_data_type (VlGMM const * self)
{
return self->dataType ;
}
/** @brief Get the number of clusters
** @param self object
** @return number of clusters.
**/
vl_size
vl_gmm_get_num_clusters (VlGMM const * self)
{
return self->numClusters ;
}
/** @brief Get the number of data points
** @param self object
** @return number of data points.
**/
vl_size
vl_gmm_get_num_data (VlGMM const * self)
{
return self->numData ;
}
/** @brief Get the log likelihood of the current mixture
** @param self object
** @return loglikelihood.
**/
double
vl_gmm_get_loglikelihood (VlGMM const * self)
{
return self->LL ;
}
/** @brief Get verbosity level
** @param self object
** @return verbosity level.
**/
int
vl_gmm_get_verbosity (VlGMM const * self)
{
return self->verbosity ;
}
/** @brief Set verbosity level
** @param self object
** @param verbosity verbosity level.
**/
void
vl_gmm_set_verbosity (VlGMM * self, int verbosity)
{
self->verbosity = verbosity ;
}
/** @brief Get means
** @param self object
** @return cluster means.
**/
void const *
vl_gmm_get_means (VlGMM const * self)
{
return self->means ;
}
/** @brief Get covariances
** @param self object
** @return diagonals of cluster covariance matrices.
**/
void const *
vl_gmm_get_covariances (VlGMM const * self)
{
return self->covariances ;
}
/** @brief Get priors
** @param self object
** @return priors of cluster gaussians.
**/
void const *
vl_gmm_get_priors (VlGMM const * self)
{
return self->priors ;
}
/** @brief Get posteriors
** @param self object
** @return posterior probabilities of cluster memberships.
**/
void const *
vl_gmm_get_posteriors (VlGMM const * self)
{
return self->posteriors ;
}
/** @brief Get maximum number of iterations
** @param self object
** @return maximum number of iterations.
**/
vl_size
vl_gmm_get_max_num_iterations (VlGMM const * self)
{
return self->maxNumIterations ;
}
/** @brief Set maximum number of iterations
** @param self VlGMM filter.
** @param maxNumIterations maximum number of iterations.
**/
void
vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations)
{
self->maxNumIterations = maxNumIterations ;
}
/** @brief Get maximum number of repetitions.
** @param self object
** @return current number of repretitions for quantization.
**/
vl_size
vl_gmm_get_num_repetitions (VlGMM const * self)
{
return self->numRepetitions ;
}
/** @brief Set maximum number of repetitions
** @param self object
** @param numRepetitions maximum number of repetitions.
** The number of repetitions cannot be smaller than 1.
**/
void
vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions)
{
assert (numRepetitions >= 1) ;
self->numRepetitions = numRepetitions ;
}
/** @brief Get data dimension
** @param self object
** @return data dimension.
**/
vl_size
vl_gmm_get_dimension (VlGMM const * self)
{
return self->dimension ;
}
/** @brief Get initialization algorithm
** @param self object
** @return initialization algorithm.
**/
VlGMMInitialization
vl_gmm_get_initialization (VlGMM const * self)
{
return self->initialization ;
}
/** @brief Set initialization algorithm.
** @param self object
** @param init initialization algorithm.
**/
void
vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init)
{
self->initialization = init;
}
/** @brief Get KMeans initialization object.
** @param self object
** @return kmeans initialization object.
**/
VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self)
{
return self->kmeansInit;
}
/** @brief Set KMeans initialization object.
** @param self object
** @param kmeans initialization KMeans object.
**/
void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans)
{
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
}
self->kmeansInit = kmeans;
self->kmeansInitIsOwner = VL_FALSE;
}
/** @brief Get the lower bound on the diagonal covariance values.
** @param self object
** @return lower bound on covariances.
**/
double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self)
{
return self->sigmaLowBound;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bounds bounds.
**
** There is one lower bound per dimension. Use ::vl_gmm_set_covariance_lower_bound
** to set all of them to a given scalar.
**/
void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds)
{
memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bound bound.
**
** While there is one lower bound per dimension, this function sets
** all of them to the specified scalar. Use ::vl_gmm_set_covariance_lower_bounds
** to set them individually.
**/
void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound)
{
int i ;
for (i = 0 ; i < (signed)self->dimension ; ++i) {
self->sigmaLowBound[i] = bound ;
}
}
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_gmm
#include "shuffle-def.h"
/* #ifdef VL_GMM_INSTANTITATING */
#endif
/* ---------------------------------------------------------------- */
#ifdef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Posterior assignments */
/* ---------------------------------------------------------------- */
/** @fn vl_get_gmm_data_posterior_f(float*,vl_size,vl_size,float const*,float const*,vl_size,float const*,float const*)
** @brief Get Gaussian modes posterior probabilities
** @param posteriors posterior probabilities (output)/
** @param numClusters number of modes in the GMM model.
** @param numData number of data elements.
** @param priors prior mode probabilities of the GMM model.
** @param means means of the GMM model.
** @param dimension data dimension.
** @param covariances diagonal covariances of the GMM model.
** @param data data.
** @return data log-likelihood.
**
** This is a helper function that does not require a ::VlGMM object
** instance to operate.
**/
double
VL_XCAT(vl_get_gmm_data_posteriors_, SFX)
(TYPE * posteriors,
vl_size numClusters,
vl_size numData,
TYPE const * priors,
TYPE const * means,
vl_size dimension,
TYPE const * covariances,
TYPE const * data)
{
vl_index i_d, i_cl;
vl_size dim;
double LL = 0;
TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI);
TYPE * logCovariances ;
TYPE * logWeights ;
TYPE * invCovariances ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ;
#else
VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ;
#endif
logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ;
invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ;
logWeights = vl_malloc(sizeof(TYPE) * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads())
#endif
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE logSigma = 0 ;
if (priors[i_cl] < VL_GMM_MIN_PRIOR) {
logWeights[i_cl] = - (TYPE) VL_INFINITY_D ;
} else {
logWeights[i_cl] = log(priors[i_cl]);
}
for(dim = 0 ; dim < dimension ; ++ dim) {
logSigma += log(covariances[i_cl*dimension + dim]);
invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim];
}
logCovariances[i_cl] = logSigma;
} /* end of parallel region */
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \
num_threads(vl_get_max_threads())
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) {
TYPE clusterPosteriorsSum = 0;
TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE p =
logWeights[i_cl]
- halfDimLog2Pi
- 0.5 * logCovariances[i_cl]
- 0.5 * distFn (dimension,
data + i_d * dimension,
means + i_cl * dimension,
invCovariances + i_cl * dimension) ;
posteriors[i_cl + i_d * numClusters] = p ;
if (p > maxPosterior) { maxPosterior = p ; }
}
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
p = exp(p - maxPosterior) ;
posteriors[i_cl + i_d * numClusters] = p ;
clusterPosteriorsSum += p ;
}
LL += log(clusterPosteriorsSum) + (double) maxPosterior ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ;
}
} /* end of parallel region */
vl_free(logCovariances);
vl_free(logWeights);
vl_free(invCovariances);
return LL;
}
/* ---------------------------------------------------------------- */
/* Restarts zero-weighted Gaussians */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData) ;
static vl_size
VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data)
{
vl_size dimension = self->dimension;
vl_size numClusters = self->numClusters;
vl_index i_cl, j_cl, i_d, d;
vl_size zeroWNum = 0;
TYPE * priors = (TYPE*)self->priors ;
TYPE * means = (TYPE*)self->means ;
TYPE * covariances = (TYPE*)self->covariances ;
TYPE * posteriors = (TYPE*)self->posteriors ;
//VlRand * rand = vl_get_rand() ;
TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ;
if (numClusters <= 1) { return 0 ; }
/* compute statistics */
{
vl_uindex i, k ;
vl_size numNullAssignments = 0 ;
for (i = 0 ; i < self->numData ; ++i) {
for (k = 0 ; k < self->numClusters ; ++k) {
TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ;
mass[k] += p ;
if (p < VL_GMM_MIN_POSTERIOR) {
numNullAssignments ++ ;
}
}
}
if (self->verbosity) {
VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ;
}
}
#if 0
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) {
if (priors[i_cl] < 0.00001/numClusters) {
double mass = priors[0] ;
vl_index best = 0 ;
for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) {
if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; }
}
if (j_cl == i_cl) {
/* this should never happen */
continue ;
}
j_cl = best ;
zeroWNum ++ ;
VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ;
priors[i_cl] = mass/2 ;
priors[j_cl] = mass/2 ;
for (d = 0 ; d < dimension ; ++d) {
TYPE sigma2 = covariances[j_cl*dimension + d] ;
TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ;
means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ;
covariances[i_cl*dimension + d] = sigma2 ;
}
}
}
#endif
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
double size = - VL_INFINITY_D ;
vl_index best = -1 ;
if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR *
VL_MAX(1.0, (double) self->numData / self->numClusters))
{
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ;
}
/*
Search for the Gaussian components that (approximately)
maximally contribute to make the negative log-likelihood of the data
large. Then split the worst offender.
To do so, we approximate the exptected log-likelihood of the GMM:
E[-log(f(x))] = H(f) = - log \int f(x) log f(x)
where the density f(x) = sum_k pk gk(x) is a GMM. This is intractable
but it is easy to approximate if we suppose that supp gk is disjoint with
supp gq for all components k ~= q. In this canse
H(f) ~= sum_k [ - pk log(pk) + pk H(gk) ]
where H(gk) is the entropy of component k taken alone. The entropy of
the latter is given by:
H(gk) = D/2 (1 + log(2pi) + 1/2 sum_{i=0}^D log sigma_i^2
*/
for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) {
double size_ ;
if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; }
size_ = + 0.5 * dimension * (1.0 + log(2*VL_PI)) ;
for(d = 0 ; d < (signed)dimension ; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
size_ += 0.5 * log(sigma2) ;
}
size_ = priors[j_cl] * (size_ - log(priors[j_cl])) ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: mode %d: prior %f, mass %f, entropy contribution %f\n",
j_cl, priors[j_cl], mass[j_cl], size_) ;
}
if (size_ > size) {
size = size_ ;
best = j_cl ;
}
}
j_cl = best ;
if (j_cl == i_cl || j_cl < 0) {
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is empty, "
"but no other mode to split could be found\n", i_cl) ;
}
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n",
i_cl, j_cl, priors[j_cl], mass[j_cl], size) ;
}
/*
Search for the dimension with maximum variance.
*/
size = - VL_INFINITY_D ;
best = - 1 ;
for(d = 0; d < (signed)dimension; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
if (sigma2 > size) {
size = sigma2 ;
best = d ;
}
}
/*
Reassign points j_cl (mode to split) to i_cl (empty mode).
*/
{
TYPE mu = means[best + j_cl * self->dimension] ;
for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) {
TYPE p = posteriors[j_cl + self->numClusters * i_d] ;
TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */
if (data[best + i_d * self->dimension] < mu) {
/* assign this point to i_cl */
posteriors[i_cl + self->numClusters * i_d] = p + q ;
posteriors[j_cl + self->numClusters * i_d] = 0 ;
} else {
/* assign this point to j_cl */
posteriors[i_cl + self->numClusters * i_d] = 0 ;
posteriors[j_cl + self->numClusters * i_d] = p + q ;
}
}
}
/*
Re-estimate.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,posteriors,priors,covariances,means,data,self->numData) ;
}
return zeroWNum;
}
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self)
{
vl_uindex dim ;
vl_uindex k ;
vl_size numAdjusted = 0 ;
TYPE * cov = (TYPE*)self->covariances ;
double const * lbs = self->sigmaLowBound ;
for (k = 0 ; k < self->numClusters ; ++k) {
vl_bool adjusted = VL_FALSE ;
for (dim = 0 ; dim < self->dimension ; ++dim) {
if (cov[k * self->dimension + dim] < lbs[dim] ) {
cov[k * self->dimension + dim] = lbs[dim] ;
adjusted = VL_TRUE ;
}
}
if (adjusted) { numAdjusted ++ ; }
}
if (numAdjusted > 0 && self->verbosity > 0) {
VL_PRINT("gmm: detected %d of %d modes with at least one dimension "
"with covariance too small (set to lower bound)\n",
numAdjusted, self->numClusters) ;
}
}
/* ---------------------------------------------------------------- */
/* EM - Maximization step */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData)
{
vl_size numClusters = self->numClusters;
vl_index i_d, i_cl;
vl_size dim ;
TYPE * oldMeans ;
double time = 0 ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering maximization step\n") ;
time = vl_get_cpu_time() ;
}
oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ;
memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ;
memset(priors, 0, sizeof(TYPE) * numClusters) ;
memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ;
memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel default(shared) private(i_d, i_cl, dim) \
num_threads(vl_get_max_threads())
#endif
{
TYPE * clusterPosteriorSum_, * means_, * covariances_ ;
#if defined(_OPENMP)
#pragma omp critical
#endif
{
clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ;
means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
}
/*
Accumulate weighted sums and sum of square differences. Once normalized,
these become the means and covariances of each Gaussian mode.
The squared differences will be taken w.r.t. the old means however. In this manner,
one avoids doing two passes across the data. Eventually, these are corrected to account
for the new means properly. In principle, one could set the old means to zero, but
this may cause numerical instabilities (by accumulating large squares).
*/
#if defined(_OPENMP)
#pragma omp for
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++i_d) {
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * self->numClusters] ;
vl_bool calculated = VL_FALSE ;
/* skip very small associations for speed */
if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; }
clusterPosteriorSum_ [i_cl] += p ;
#ifndef VL_DISABLE_AVX
if (vl_get_simd_enabled() && vl_cpu_has_avx()) {
VL_XCAT(_vl_weighted_mean_avx_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_avx_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
#ifndef VL_DISABLE_SSE2
if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) {
VL_XCAT(_vl_weighted_mean_sse2_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
if(!calculated) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE x = data[i_d * self->dimension + dim] ;
TYPE mu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = x - mu ;
means_ [i_cl * self->dimension + dim] += p * x ;
covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ;
}
}
}
}
/* accumulate */
#if defined(_OPENMP)
#pragma omp critical
#endif
{
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors [i_cl] += clusterPosteriorSum_ [i_cl];
for (dim = 0 ; dim < self->dimension ; ++dim) {
means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ;
covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ;
}
}
vl_free(means_);
vl_free(covariances_);
vl_free(clusterPosteriorSum_);
}
} /* parallel section */
/* at this stage priors[] contains the total mass of each cluster */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
/* do not update modes that do not recieve mass */
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
means[i_cl * self->dimension + dim] /= mass ;
covariances[i_cl * self->dimension + dim] /= mass ;
}
}
}
/* apply old to new means correction */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE mu = means[i_cl * self->dimension + dim] ;
TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = mu - oldMu ;
covariances[i_cl * self->dimension + dim] -= diff * diff ;
}
}
}
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
{
TYPE sum = 0;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
sum += priors[i_cl] ;
}
sum = VL_MAX(sum, 1e-12) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors[i_cl] /= sum ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: maximization step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
vl_free(oldMeans);
}
/* ---------------------------------------------------------------- */
/* EM iterations */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_gmm_em_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_size iteration, restarted ;
double previousLL = (TYPE)(-VL_INFINITY_D) ;
double LL = (TYPE)(-VL_INFINITY_D) ;
double time = 0 ;
_vl_gmm_prepare_for_data (self, numData) ;
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
for (iteration = 0 ; 1 ; ++ iteration) {
double eps ;
/*
Expectation: assign data to Gaussian modes
and compute log-likelihood.
*/
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering expectation step\n") ;
time = vl_get_cpu_time() ;
}
LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX)
(self->posteriors,
self->numClusters,
numData,
self->priors,
self->means,
self->dimension,
self->covariances,
data) ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: expectation step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
/*
Check the termination conditions.
*/
if (self->verbosity) {
VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n",
iteration, LL, LL - previousLL) ;
}
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because "
"the maximum number of iterations "
"(%d) has been reached.\n", self->maxNumIterations) ;
}
break ;
}
eps = vl_abs_d ((LL - previousLL) / (LL));
if ((iteration > 0) && (eps < 0.00001)) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because the algorithm "
"fully converged (log-likelihood variation = %f).\n", eps) ;
}
break ;
}
previousLL = LL ;
/*
Restart empty modes.
*/
if (iteration > 1) {
restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX)
(self, data);
if ((restarted > 0) & (self->verbosity > 0)) {
VL_PRINTF("gmm: em: %d Gaussian modes restarted because "
"they had become empty.\n", restarted);
}
}
/*
Maximization: reestimate the GMM parameters.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ;
}
return LL;
}
/* ---------------------------------------------------------------- */
/* Kmeans initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_init_with_kmeans_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_size i_d ;
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData);
_vl_gmm_prepare_for_data (self, numData) ;
memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->priors,0,sizeof(TYPE) * self->numClusters) ;
memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ;
/* setup speified KMeans initialization object if any */
if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; }
/* if a KMeans initalization object is still unavailable, create one */
if(self->kmeansInit == NULL) {
vl_size ncomparisons = VL_MAX(numData / 4, 10) ;
vl_size niter = 5 ;
vl_size ntrees = 1 ;
vl_size nrepetitions = 1 ;
VlKMeansAlgorithm algorithm = VlKMeansANN ;
VlKMeansInitialization initialization = VlKMeansRandomSelection ;
VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_set_initialization(kmeansInitDefault, initialization);
vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ;
vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ;
vl_kmeans_set_num_trees (kmeansInitDefault, ntrees);
vl_kmeans_set_algorithm (kmeansInitDefault, algorithm);
vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions);
vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity);
self->kmeansInit = kmeansInitDefault;
self->kmeansInitIsOwner = VL_TRUE ;
}
/* Use k-means to assign data to clusters */
vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters);
vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ;
/* Transform the k-means assignments in posteriors and estimates the mode parameters */
for(i_d = 0; i_d < numData; i_d++) {
((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ;
}
/* Update cluster parameters */
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData);
vl_free(assignments) ;
}
/* ---------------------------------------------------------------- */
/* Random initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX)
(VlGMM * self,
TYPE const * data,
TYPE * initSigma,
vl_size dimension,
vl_size numData)
{
vl_size dim;
vl_uindex i;
TYPE * dataMean ;
memset(initSigma,0,sizeof(TYPE)*dimension) ;
if (numData <= 1) return ;
dataMean = vl_malloc(sizeof(TYPE)*dimension);
memset(dataMean,0,sizeof(TYPE)*dimension) ;
/* find mean of the whole dataset */
for(dim = 0 ; dim < dimension ; dim++) {
for(i = 0 ; i < numData ; i++) {
dataMean[dim] += data[i*dimension + dim];
}
dataMean[dim] /= numData;
}
/* compute variance of the whole dataset */
for(dim = 0; dim < dimension; dim++) {
for(i = 0; i < numData; i++) {
TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ;
initSigma[dim] += diff*diff ;
}
initSigma[dim] /= numData - 1 ;
}
vl_free(dataMean) ;
}
static void
VL_XCAT(_vl_gmm_init_with_rand_data_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_uindex i, k, dim ;
VlKMeans * kmeans ;
_vl_gmm_prepare_for_data(self, numData) ;
/* initilaize priors of gaussians so they are equal and sum to one */
for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; }
/* initialize diagonals of covariance matrices to data covariance */
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData);
for (k = 1 ; k < self->numClusters ; ++ k) {
for(dim = 0; dim < self->dimension; dim++) {
*((TYPE*)self->covariances + k * self->dimension + dim) =
*((TYPE*)self->covariances + dim) ;
}
}
/* use kmeans++ initialization to pick points at random */
kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ;
memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ;
vl_kmeans_delete(kmeans) ;
}
/* ---------------------------------------------------------------- */
#else /* VL_GMM_INSTANTIATING */
/* ---------------------------------------------------------------- */
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#endif
/* VL_GMM_INSTANTIATING */
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/** @brief Create a new GMM object by copy
** @param self object.
** @return new copy.
**
** Most parameters, including the cluster priors, means, and
** covariances are copied. Data posteriors (available after
** initalization or EM) are not; nor is the KMeans object used for
** initialization, if any.
**/
VlGMM *
vl_gmm_new_copy (VlGMM const * self)
{
vl_size size = vl_get_type_size(self->dataType) ;
VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters);
gmm->initialization = self->initialization;
gmm->maxNumIterations = self->maxNumIterations;
gmm->numRepetitions = self->numRepetitions;
gmm->verbosity = self->verbosity;
gmm->LL = self->LL;
memcpy(gmm->means, self->means, size*self->numClusters*self->dimension);
memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension);
memcpy(gmm->priors, self->priors, size*self->numClusters);
return gmm ;
}
/** @brief Initialize mixture before EM takes place using random initialization
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
void
vl_gmm_init_with_rand_data
(VlGMM * self,
void const * data,
vl_size numData)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
}
/** @brief Initializes the GMM using KMeans
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
** @param kmeansInit KMeans object to use.
**/
void
vl_gmm_init_with_kmeans
(VlGMM * self,
void const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_gmm_init_with_kmeans_f
(self, (float const *)data, numData, kmeansInit) ;
break ;
case VL_TYPE_DOUBLE :
_vl_gmm_init_with_kmeans_d
(self, (double const *)data, numData, kmeansInit) ;
break ;
default:
abort() ;
}
}
#if 0
#include<fenv.h>
#endif
/** @brief Run GMM clustering - includes initialization and EM
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_cluster (VlGMM * self,
void const * data,
vl_size numData)
{
void * bestPriors = NULL ;
void * bestMeans = NULL;
void * bestCovariances = NULL;
void * bestPosteriors = NULL;
vl_size size = vl_get_type_size(self->dataType) ;
double bestLL = -VL_INFINITY_D;
vl_uindex repetition;
assert(self->numRepetitions >=1) ;
bestPriors = vl_malloc(size * self->numClusters) ;
bestMeans = vl_malloc(size * self->dimension * self->numClusters) ;
bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ;
bestPosteriors = vl_malloc(size * self->numClusters * numData) ;
#if 0
feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
#endif
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double LL ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
/* initialize a new mixture model */
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ;
case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ;
case VlGMMCustom : break ;
default: abort() ;
}
if (self->verbosity) {
VL_PRINTF("gmm: model initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
/* fit the model to data by running EM */
timeRef = vl_get_cpu_time () ;
LL = vl_gmm_em (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n",
vl_get_cpu_time() - timeRef, LL) ;
}
if (LL > bestLL || repetition == 0) {
void * temp ;
temp = bestPriors ;
bestPriors = self->priors ;
self->priors = temp ;
temp = bestMeans ;
bestMeans = self->means ;
self->means = temp ;
temp = bestCovariances ;
bestCovariances = self->covariances ;
self->covariances = temp ;
temp = bestPosteriors ;
bestPosteriors = self->posteriors ;
self->posteriors = temp ;
bestLL = LL;
}
}
vl_free (self->priors) ;
vl_free (self->means) ;
vl_free (self->covariances) ;
vl_free (self->posteriors) ;
self->priors = bestPriors ;
self->means = bestMeans ;
self->covariances = bestCovariances ;
self->posteriors = bestPosteriors ;
self->LL = bestLL;
if (self->verbosity) {
VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ;
}
return bestLL ;
}
/** @brief Invoke the EM algorithm.
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_em (VlGMM * self, void const * data, vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT:
return _vl_gmm_em_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE:
return _vl_gmm_em_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
return 0 ;
}
/** @brief Explicitly set the initial means for EM.
** @param self GMM object instance.
** @param means initial values of means.
**/
void
vl_gmm_set_means (VlGMM * self, void const * means)
{
memcpy(self->means,means,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial sigma diagonals for EM.
** @param self GMM object instance.
** @param covariances initial values of covariance matrix diagonals.
**/
void vl_gmm_set_covariances (VlGMM * self, void const * covariances)
{
memcpy(self->covariances,covariances,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial priors of the gaussians.
** @param self GMM object instance.
** @param priors initial values of the gaussian priors.
**/
void vl_gmm_set_priors (VlGMM * self, void const * priors)
{
memcpy(self->priors,priors,
self->numClusters * vl_get_type_size(self->dataType));
}
/* VL_GMM_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_GMM_INSTANTIATING
|
zkbdf_verifyPseudo.c | /*
Name: zkbdf_verifyPseudo.c
Author: Tan Teik Guan
Description: Verify function for VDF realization using ZKBoo. Modified from MPC_SHA256_VERIFIER.c
*/
/*
============================================================================
Name : MPC_SHA256_VERIFIER.c
Author : Sobuno
Version : 0.1
Description : Verifies a proof for SHA-256 generated by MPC_SHA256.c
============================================================================
*/
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "shared.h"
int NUM_ROUNDS = 100;
int NUM_LOOPS = 1;
void printbits(uint32_t n) {
if (n) {
printbits(n >> 1);
printf("%d", n & 1);
}
}
int main(int argc, char * argv[]) {
setbuf(stdout, NULL);
init_EVP();
openmp_thread_setup();
char CHALLENGE[BLOCK_SIZE];
char ek[BLOCK_SIZE];
if (argc != 4)
{
printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char> <eval key (Max %d char)>\n",argv[0],MSG_SIZE,MSG_SIZE);
return -1;
}
NUM_ROUNDS = atoi(argv[1]);
memset(CHALLENGE,0,sizeof(CHALLENGE));
strncpy(CHALLENGE,argv[2],MSG_SIZE);
memset(ek,0,sizeof(ek));
strncpy(ek,argv[3],MSG_SIZE);
printf("Iterations of SHA: %d\n", NUM_ROUNDS);
int i;
i = strlen(ek);
printf("length of ek: %d\n",i);
unsigned char input[BLOCK_SIZE];
memset(input,0,sizeof(input));
for (int j=0;j<i;j++)
input[j] = ek[j];
a as[NUM_ROUNDS];
z zs[NUM_ROUNDS];
FILE *file;
int failed = 0;
char outputFile[3*sizeof(int) + 8];
sprintf(outputFile, "out%i.bin", NUM_ROUNDS);
file = fopen(outputFile, "rb");
if (!file) {
printf("Unable to open file!");
return -1;
}
fread(&as, sizeof(a), NUM_ROUNDS, file);
fread(&zs, sizeof(z), NUM_ROUNDS, file);
fclose(file);
struct timeval begin, delta;
gettimeofday(&begin,NULL);
for(int loops=0;loops<NUM_LOOPS;loops++)
{
uint32_t y1[8];
uint32_t y2[8];
reconstruct(as[0].yp1[0],as[0].yp1[1],as[0].yp1[2],y1);
reconstruct(as[0].yp2[0],as[0].yp2[1],as[0].yp2[2],y2);
printf("Received output for H(ek): ");
for(int i=0;i<8;i++) {
printf("%02X", y1[i]);
}
printf("\n");
printf("Received output for Hmac(ek,challenge): ");
for(int i=0;i<8;i++) {
printf("%02X", y2[i]);
}
printf("\n");
{
SHA256_CTX ctx;
unsigned char expectedhash[SHA256_DIGEST_LENGTH];
int l;
SHA256_Init(&ctx);
SHA256_Update(&ctx, input, strlen(input));
SHA256_Final(expectedhash, &ctx);
for (l=0;l<8;l++)
{
uint32_t temp;
// to take care of big endian
unsigned char tempc[4];
tempc[0] = expectedhash[l*4+3];
tempc[1] = expectedhash[l*4+2];
tempc[2] = expectedhash[l*4+1];
tempc[3] = expectedhash[l*4];
memcpy(&temp,tempc,4);
if (temp != y1[l])
{
printf("hash does not match !!\n");
return -1;
}
}
}
int es[NUM_ROUNDS*2];
unsigned char plaintext[NUM_ROUNDS][16];
memset(&(plaintext[0]),0x30,16);
#pragma omp parallel for
for (int i=0; i<(NUM_ROUNDS); i++)
{
if (i < (NUM_ROUNDS-1))
{
SHA256_CTX ctx;
unsigned char prevroundhash[SHA256_DIGEST_LENGTH];
SHA256_Init(&ctx);
SHA256_Update(&ctx, &(zs[i]), sizeof(z));
SHA256_Final(prevroundhash, &ctx);
memcpy(plaintext[i+1],prevroundhash,16);
}
H3(y1,y2,&(as[i]), 1, &(es[i]));
}
#pragma omp parallel for
for(int i = 0; i<(NUM_ROUNDS); i++) {
int verifyResult = verify(as[i], CHALLENGE, es[i], plaintext[i], zs[i]);
if (verifyResult != 0) {
printf("Not Verified %d\n", i);
failed = 1;
}
}
}
if (!failed)
printf("verified ok \n",i);
gettimeofday(&delta,NULL);
unsigned long inMilli = (delta.tv_sec - begin.tv_sec)*1000000 + (delta.tv_usec - begin.tv_usec);
inMilli /= 1000;
printf("Total time for %d loops: %ju miliseconds\n", NUM_LOOPS,(uintmax_t)inMilli);
printf("Time for 1 loop: %ju miliseconds\n", (uintmax_t)inMilli/NUM_LOOPS);
openmp_thread_cleanup();
cleanup_EVP();
return EXIT_SUCCESS;
}
|
ex_single_master.c | #include <stdio.h>
#include <omp.h>
int main()
{
int ii;
#pragma omp parallel
{
#pragma omp single
for(ii=0;ii<10;ii++)
printf("At single: iteration %d from thread %d\n", ii,omp_get_thread_num());
#pragma omp master
printf("By master: %d\n",omp_get_thread_num());
}
return 0;
}
|
GB_unop__tanh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tanh_fc64_fc64
// op(A') function: GB_unop_tran__tanh_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = ctanh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctanh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = ctanh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tanh_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctanh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctanh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tanh_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_int16)
// op(A') function: GB (_unop_tran__identity_fc32_int16)
// C type: GxB_FC32_t
// A type: int16_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_int16)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduction-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20, a[n],suma=0;
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if(n>20) {n=20; printf("n=%d",n);}
for(i=0;i<n;i++) a[i]=i;
#pragma omp parallel for reduction(+:suma)
for (i=0;i<n;i++) suma+=a[i];
printf("Tras 'parallel' suma=%d\n",suma);
}
|
sections.c | /*
$ gcc -fopenmp -O2 src/sections.c -o bin/sections
$ export OMP_NUM_THREADS=4
$ ./bin/sections
En funcB: esta sección la ejecuta el thread 3
En funcA: esta sección la ejecuta el thread 2
La ejecuta la primera que llegue
*/
#include <stdio.h>
#include <omp.h>
void funcA() {
printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num());
}
void funcB() {
printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num());
}
main() {
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
(void) funcA();
#pragma omp section
(void) funcB();
}
}
} |
GB_binop__div_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int32)
// A*D function (colscale): GB (_AxD__div_int32)
// D*A function (rowscale): GB (_DxB__div_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int32)
// C=scalar+B GB (_bind1st__div_int32)
// C=scalar+B' GB (_bind1st_tran__div_int32)
// C=A+scalar GB (_bind2nd__div_int32)
// C=A'+scalar GB (_bind2nd_tran__div_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (x, y, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT32 || GxB_NO_DIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 32) ; \
}
GrB_Info GB (_bind1st_tran__div_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 32) ; \
}
GrB_Info GB (_bind2nd_tran__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_int16
// op(A') function: GB_tran__minv_uint32_int16
// C type: uint32_t
// A type: int16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_int16
(
uint32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
merge_bottom_up_multithreads.c | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include"testrc.h"
#include<omp.h>
#include<sys/time.h>
data_type a[MAXN];
int n;
// array a, [begin,end)
void merge_sort(data_type* begin,data_type* end){
int size=end-begin;
data_type* i;
for (int gap=1;gap<size;gap<<=1){
#pragma omp parallel for
for (i=begin;i<end-gap;i+=(gap<<1))
if (i<end-(gap<<1))__merge(i,i+(gap<<1),i+gap);
else __merge(i,end,i+gap);
}
}
int main(){
scanf("%d",&n);
for (int i=0;i<n;++i) scanf(__READ_TYPE,a+i);
#ifndef __CHECK
double start,end;
start=get_time();
#endif
merge_sort(a,a+n);
#ifndef __CHECK
end=get_time();
FILE* fp=freopen("merge_bottom_up.txt","a+",stdout);
printf(" %.6lf\n",(end-start));
fclose(fp);
#endif
#ifdef __CHECK
for (int i=0;i<n;++i) printf(__PRINT_TYPE,a[i]);
#endif
return 0;
}
|
reduction.h | #ifndef __DACE_REDUCTION_H
#define __DACE_REDUCTION_H
#include <cstdint>
#include "types.h"
#include "math.h" // for ::min, ::max
#ifdef __CUDACC__
#include "../../../external/cub/cub/device/device_segmented_reduce.cuh"
#include "../../../external/cub/cub/device/device_reduce.cuh"
#include "../../../external/cub/cub/block/block_reduce.cuh"
#include "../../../external/cub/cub/iterator/counting_input_iterator.cuh"
#include "../../../external/cub/cub/iterator/transform_input_iterator.cuh"
#endif
#ifdef __HIPCC__
// HIP supports the same set of atomic ops as CUDA SM 6.0+
#define DACE_USE_GPU_ATOMICS
#define DACE_USE_GPU_DOUBLE_ATOMICS
#elif defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
#define DACE_USE_GPU_ATOMICS
#if __CUDA_ARCH__ >= 600
#define DACE_USE_GPU_DOUBLE_ATOMICS
#endif
#endif
// Specializations for reductions implemented in frameworks like OpenMP, MPI
namespace dace {
// Internal type. See below for wcr_fixed external type, which selects
// the implementation according to T's properties.
template <ReductionType REDTYPE, typename T>
struct _wcr_fixed
{
static DACE_HDFI T reduce_atomic(T *ptr, const T& value);
DACE_HDFI T operator()(const T &a, const T &b) const;
};
// Custom reduction with a lambda function
template <typename T>
struct wcr_custom {
template <typename WCR>
static DACE_HDFI T reduce_atomic(WCR wcr, T *ptr, const T& value) {
// The slowest kind of atomic operations (locked/compare-and-swap),
// this should only happen in case of unrecognized lambdas
T old;
#ifdef DACE_USE_GPU_ATOMICS
// Adapted from CUDA's pre-v8.0 double atomicAdd implementation
T assumed;
old = *ptr;
do {
assumed = old;
old = atomicCAS(ptr, assumed, wcr(assumed, value));
} while (assumed != old);
#else
#pragma omp critical
{
old = *ptr;
*ptr = wcr(old, value);
}
#endif
return old;
}
// Non-conflicting version --> no critical section
template <typename WCR>
static DACE_HDFI T reduce(WCR wcr, T *ptr, const T& value) {
T old = *ptr;
*ptr = wcr(old, value);
return old;
}
};
// Specialization of CAS for float and double
template <>
struct wcr_custom<float> {
template <typename WCR>
static DACE_HDFI float reduce_atomic(WCR wcr, float *ptr, const float& value) {
// The slowest kind of atomic operations (locked/compare-and-swap),
// this should only happen in case of unrecognized lambdas
#ifdef DACE_USE_GPU_ATOMICS
// Adapted from CUDA's pre-v8.0 double atomicAdd implementation
int *iptr = (int *)ptr;
int old = *iptr, assumed;
do {
assumed = old;
old = atomicCAS(iptr, assumed,
__float_as_int(wcr(__int_as_float(assumed), value)));
} while (assumed != old);
return __int_as_float(old);
#else
float old;
#pragma omp critical
{
old = *ptr;
*ptr = wcr(old, value);
}
return old;
#endif
}
// Non-conflicting version --> no critical section
template <typename WCR>
static DACE_HDFI float reduce(WCR wcr, float *ptr, const float& value) {
float old = *ptr;
*ptr = wcr(old, value);
return old;
}
};
template <>
struct wcr_custom<double> {
template <typename WCR>
static DACE_HDFI double reduce_atomic(WCR wcr, double *ptr, const double& value) {
// The slowest kind of atomic operations (locked/compare-and-swap),
// this should only happen in case of unrecognized lambdas
#ifdef DACE_USE_GPU_ATOMICS
// Adapted from CUDA's pre-v8.0 double atomicAdd implementation
unsigned long long *iptr = (unsigned long long *)ptr;
unsigned long long old = *ptr, assumed;
do {
assumed = old;
old = atomicCAS(
iptr, assumed,
__double_as_longlong(
wcr(__longlong_as_double(assumed),
value)));
} while (assumed != old);
return __longlong_as_double(old);
#else
double old;
#pragma omp critical
{
old = *ptr;
*ptr = wcr(old, value);
}
return old;
#endif
}
// Non-conflicting version --> no critical section
template <typename WCR>
static DACE_HDFI double reduce(WCR wcr, double *ptr, const double& value) {
double old;
*ptr = wcr(old, value);
return old;
}
};
// End of specialization
template <typename T>
struct _wcr_fixed<ReductionType::Sum, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicAdd(ptr, value);
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
#pragma omp atomic capture
{
old = *ptr;
*ptr += value;
}
return old;
#else
#pragma omp atomic
*ptr += value;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a + b; }
};
// Implementation of double atomicAdd for CUDA architectures prior to 6.0
#if defined(DACE_USE_GPU_ATOMICS) && !defined(DACE_USE_GPU_DOUBLE_ATOMICS)
template <>
struct _wcr_fixed<ReductionType::Sum, double> {
static DACE_HDFI double reduce_atomic(double *ptr, const double& value) {
unsigned long long int* address_as_ull = (unsigned long long int*)ptr;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(value + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
DACE_HDFI double operator()(const double &a, const double &b) const { return a + b; }
};
#endif
template <typename T>
struct _wcr_fixed<ReductionType::Product, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return wcr_custom<T>::reduce(
_wcr_fixed<ReductionType::Product, T>(), ptr, value);
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
#pragma omp atomic capture
{
old = *ptr;
*ptr *= value;
}
return old;
#else
#pragma omp atomic
*ptr *= value;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a * b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Min, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicMin(ptr, value);
#else
return wcr_custom<T>::reduce_atomic(
_wcr_fixed<ReductionType::Min, T>(), ptr, value);
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return ::min(a, b); }
};
template <typename T>
struct _wcr_fixed<ReductionType::Max, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicMax(ptr, value);
#else
return wcr_custom<T>::reduce_atomic(
_wcr_fixed<ReductionType::Max, T>(), ptr, value);
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return ::max(a, b); }
};
// Specialization for floating point types
template <>
struct _wcr_fixed<ReductionType::Min, float> {
static DACE_HDFI float reduce_atomic(float *ptr, const float& value) {
return wcr_custom<float>::reduce_atomic(
_wcr_fixed<ReductionType::Min, float>(), ptr, value);
}
DACE_HDFI float operator()(const float &a, const float &b) const { return ::min(a, b); }
};
template <>
struct _wcr_fixed<ReductionType::Max, float> {
static DACE_HDFI float reduce_atomic(float *ptr, const float& value) {
return wcr_custom<float>::reduce_atomic(
_wcr_fixed<ReductionType::Max, float>(), ptr, value);
}
DACE_HDFI float operator()(const float &a, const float &b) const { return ::max(a, b); }
};
template <>
struct _wcr_fixed<ReductionType::Min, double> {
static DACE_HDFI double reduce_atomic(double *ptr, const double& value) {
return wcr_custom<double>::reduce_atomic(
_wcr_fixed<ReductionType::Min, double>(), ptr, value);
}
DACE_HDFI double operator()(const double &a, const double &b) const { return ::min(a, b); }
};
template <>
struct _wcr_fixed<ReductionType::Max, double> {
static DACE_HDFI double reduce_atomic(double *ptr, const double& value) {
return wcr_custom<double>::reduce_atomic(
_wcr_fixed<ReductionType::Max, double>(), ptr, value);
}
DACE_HDFI double operator()(const double &a, const double &b) const { return ::max(a, b); }
};
// End of specialization
template <typename T>
struct _wcr_fixed<ReductionType::Logical_And, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicAnd(ptr, value ? T(1) : T(0));
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
T val = (value ? T(1) : T(0));
#pragma omp atomic capture
{
old = *ptr;
*ptr &= val;
}
return old;
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr &= val;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a && b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_And, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicAnd(ptr, value);
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
#pragma omp atomic capture
{
old = *ptr;
*ptr &= value;
}
return old;
#else
#pragma omp atomic
*ptr &= value;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a & b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Logical_Or, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicOr(ptr, value ? T(1) : T(0));
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
T val = (value ? T(1) : T(0));
#pragma omp atomic capture
{
old = *ptr;
*ptr |= val;
}
return old;
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr |= val;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a || b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_Or, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicOr(ptr, value);
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
#pragma omp atomic capture
{
old = *ptr;
*ptr |= value;
}
return old;
#else
#pragma omp atomic
*ptr |= value;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a | b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Logical_Xor, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicXor(ptr, value ? T(1) : T(0));
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
T val = (value ? T(1) : T(0));
#pragma omp atomic capture
{
old = *ptr;
*ptr ^= val;
}
return old;
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr ^= val;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a != b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_Xor, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicXor(ptr, value);
#elif defined (_OPENMP) && _OPENMP >= 201107
T old;
#pragma omp atomic capture
{
old = *ptr;
*ptr ^= value;
}
return old;
#else
#pragma omp atomic
*ptr ^= value;
return T(0); // Unsupported
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a ^ b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Exchange, T> {
static DACE_HDFI T reduce_atomic(T *ptr, const T& value) {
#ifdef DACE_USE_GPU_ATOMICS
return atomicExch(ptr, value);
#else
T old;
#pragma omp critical
{
old = *ptr;
*ptr = value;
}
return old;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return b; }
};
//////////////////////////////////////////////////////////////////////////
// Specialization that regresses to critical section / locked update for
// unsupported types
template<typename T>
using EnableIfScalar = typename std::enable_if<std::is_scalar<T>::value>::type;
// Any vector type that is not of length 1, or struct/complex types
// do not support atomics. In these cases, we regress to locked updates.
template <ReductionType REDTYPE, typename T, typename SFINAE = void>
struct wcr_fixed
{
static DACE_HDFI T reduce(T *ptr, const T& value)
{
T old = *ptr;
*ptr = _wcr_fixed<REDTYPE, T>()(old, value);
return old;
}
static DACE_HDFI T reduce_atomic(T *ptr, const T& value)
{
return wcr_custom<T>::template reduce_atomic(
_wcr_fixed<REDTYPE, T>(), ptr, value);
}
};
// When atomics are supported, use _wcr_fixed normally
template <ReductionType REDTYPE, typename T>
struct wcr_fixed<REDTYPE, T, EnableIfScalar<T> >
{
static DACE_HDFI T reduce(T *ptr, const T& value)
{
T old = *ptr;
*ptr = _wcr_fixed<REDTYPE, T>()(old, value);
return old;
}
static DACE_HDFI T reduce_atomic(T *ptr, const T& value)
{
return _wcr_fixed<REDTYPE, T>::reduce_atomic(ptr, value);
}
DACE_HDFI T operator()(const T &a, const T &b) const
{
return _wcr_fixed<REDTYPE, T>()(a, b);
}
};
#ifdef __CUDACC__
struct StridedIteratorHelper {
explicit StridedIteratorHelper(size_t stride)
: stride(stride) {}
size_t stride;
__host__ __device__ __forceinline__
size_t operator()(const size_t &index) const {
return index * stride;
}
};
inline auto stridedIterator(size_t stride) {
cub::CountingInputIterator<int> counting_iterator(0);
StridedIteratorHelper conversion_op(stride);
cub::TransformInputIterator<int, decltype(conversion_op), decltype(counting_iterator)> itr(counting_iterator, conversion_op);
return itr;
}
#endif
} // namespace dace
#endif // __DACE_REDUCTION_H
|
GB_unaryop__lnot_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int16
// op(A') function: GB_tran__lnot_fp64_int16
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int16
(
double *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hmacSHA512_fmt_plug.c | /*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* Based on hmac-md5 by Bartavelle
*
* SIMD added Feb, 2015, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hmacSHA512;
extern struct fmt_main fmt_hmacSHA384;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hmacSHA512);
john_register_one(&fmt_hmacSHA384);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "base64_convert.h"
#include "formats.h"
#include "aligned.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // scaled on core i7-quad HT
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512 // scaled K8-dual HT
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "HMAC-SHA512"
#define FORMAT_LABEL_384 "HMAC-SHA384"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "password is key, SHA512 " SHA512_ALGORITHM_NAME
#define ALGORITHM_NAME_384 "password is key, SHA384 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 128
#define PAD_SIZE_W (PAD_SIZE/8)
#define BINARY_SIZE (512/8)
#define BINARY_SIZE_384 (384/8)
#define BINARY_ALIGN 8
#ifndef SIMD_COEF_64
#define SALT_LENGTH 1023
#define SALT_ALIGN 1
#else
#define SALT_LIMBS 2 /* 2 limbs, 239 bytes */
#define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 17)
#define SALT_ALIGN MEM_ALIGN_SIMD
#endif
#define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2)
#define CIPHERTEXT_LENGTH_384 (SALT_LENGTH + 1 + BINARY_SIZE_384 * 2)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i&127)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i&127)&7)) + index/SIMD_COEF_64 * PAD_SIZE * SIMD_COEF_64 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"what do ya want for nothing?#164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737", "Jefe"},
{"Reference hashes are keys to success#73a5eff716d0147a440fdf5aff187c52deab8c4dc55073be3d5742e788a99fd6b53a5894725f0f88f3486b5bb63d2af930a0cf6267af572128273daf8eee4cfa", "The magnum"},
{"Beppe#Grillo#AB08C46822313481D548412A084F08C7CA3BBF8A98D901D14698759F4C36ADB07528348D56CAF4F6AF654E14FC102FF10DCF50794A82544426386C7BE238CEAF", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
{"hjXNSoAhq2YLm2vSFtc7BCJNUS9RNPcl#1c10f4d7237b034f27e7af81705e6cb0acecac395086e81e55a391a12b60b49e375b2de39c94f4989a50604754ffeea0b379ae1d4cc6b3550cd0a24a582ef351", "1"},
{"JkbHdY2Biype3gv2TpG2Wnv68OF7p6cl#a1f6e131e2fe1f728c5f2b8d0d8af9a6e202868ab9abef0e8f9126a712a4ae7f10533bbdedb710f6a521302c48a743caab1715aa85c4a57fbd51fde5e07945d9", "22"},
{"X4eOvWZw1b9L1NiND4vQxutubtrGhzNe#5a6002cedb05b97ce13393acab09767005a611dfc3e306305772c614ff4869077b3080f23694d3efc6d1998b4514fe8316389edb5f61dbcea8bd3b4d01595ae1", "333"},
{"VYG7HeRZLyie5jdzDRaqfd0yYX8PFstX#dd2b8b8a97c56af68fef5e73bf1eceec0c951084f97b66196b32758ed8b34a8d2f0e10663acac662e393fd42c0043e4cedf0d3c617ed43ba61b0297353fc2e2a", "4444"},
{"x8nIFPPTMJMEZLMSELpEub6bQjQzyjkq#fb92efe7d0abff004c8dc94c64356536df65dd42c323da1de4c583c255135b1a15002efc0b794683e7ac4ea7e7ae3813fb132b43c86a6951059a1574908987fb", "55555"},
{"Hr8KfafSSsEJfp5HZRLVAGQFrEPTDiSi#752e874177fc0f31149ebc699c32b2f7f600ad4d28f1fc27eb715a328100e6e67ff2845b20acd9ebc4befc7a629f1bd9a5b96abf981dcaba71317dcbb8cfdfba", "666666"},
{"UH0LvhZUihMMECAW0Ummw2OSgAOzV0i9#de3d4986007b1f45542f1d38d294ac69a0e23e2985103082a6ee134d4c786cfcb61d90be72388280e119e047bab32e68c6615d45d21895e5b8ef2b7eaf7258fd", "7777777"},
{"hX4OqAvhCjwEPwsi9I7SlIQbmlDb6LDh#cbf4fbb0721c9ec00af347d78046c314087efcbce47ef732e119433dc6f7fe3d2788e0a20d76bd2b1f9b199c9914eeaee0a51a2fb88cfbb7472b538e45b53711", "88888888"},
{"gOONPyTnQVKWMvh61x8Y1JGlDalKCBAE#9d4d34c76cb2a4cbecb8929be61dd4af5088a055bd338cd245311786c4119a5b526b72646626fff1cb4931eb0fe05d8a7648a66f0db1f2522b8af1cfc2ac8e74", "999999999"},
{"F3WBOJKUyVWbnqtGZ2ur8uW0nqIBpObK#6043dd6dd3dd96699db8351b0db762af27a5db06169ec6668e9f464fcc3fdf1d7deafaccb67e5ef7f5ee96b2a5efad33a8af20eb19fe60d8b20e7994c76a0610", "0000000000"},
{"pfZzfOSVpQvuILYEIAeCT8Xnj7eQnR2w#ff80da7bbcdb11fd8bb282a80603ed34847d897701fd547d06f4438072ecd43058a3b7c0b3a296f7c5dbbf06beb3825d1eb7122f01ad78ef2afc5ab09c46ca45", "11111111111"},
/* mockup JWT hash */
{"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.r7FDU+ahrbW0Wtsekh5UNqV2iyXGrQQaRZjdc8i733QIoTSIQM//FSGjP151C2ijvNUVo5syWOW+RpZc7khU1g", "magnum"},
{NULL}
};
static struct fmt_tests tests_384[] = {
{"what do ya want for nothing?#af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec3736322445e8e2240ca5e69e2c78b3239ecfab21649", "Jefe"},
{"Beppe#Grillo#8361922C63506E53714F8A8491C6621A76CF0FD6DFEAD91BF59B420A23DFF2745C0A0D5E142D4F937E714EA8C228835B", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
/* mockup JWT hash */
{"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.WNzjJCdDCTV3hLfsRy//hny9VzlaZXHFvoKSJXB5/rbKkXwE1Jve/DUirW7r5ztm", "magnum"},
{NULL}
};
#ifdef SIMD_COEF_64
static unsigned char *crypt_key;
static unsigned char *ipad, *prep_ipad;
static unsigned char *opad, *prep_opad;
typedef struct cur_salt_t {
unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT];
int salt_len;
} cur_salt_t;
static cur_salt_t *cur_salt;
static int bufsize;
#define SALT_SIZE sizeof(cur_salt_t)
#else
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
static unsigned char (*opad)[PAD_SIZE];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char cur_salt[SALT_LENGTH+1];
static SHA512_CTX *ipad_ctx;
static SHA512_CTX *opad_ctx;
#define SALT_SIZE sizeof(cur_salt)
#endif
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int new_keys;
#ifdef SIMD_COEF_64
static void clear_keys(void)
{
memset(ipad, 0x36, bufsize);
memset(opad, 0x5C, bufsize);
}
#endif
static void init(struct fmt_main *self, const int B_LEN)
{
#ifdef SIMD_COEF_64
int i;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE;
crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
prep_opad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
crypt_key[GETPOS(B_LEN, i)] = 0x80;
((uint64_t*)crypt_key)[15 * SIMD_COEF_64 + (i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (B_LEN + PAD_SIZE) << 3;
}
clear_keys();
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
ipad = mem_calloc(sizeof(*ipad), self->params.max_keys_per_crypt);
opad = mem_calloc(sizeof(*opad), self->params.max_keys_per_crypt);
ipad_ctx = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*opad_ctx), 8);
opad_ctx = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*opad_ctx), 8);
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void init_512(struct fmt_main *self) {
init(self, BINARY_SIZE);
}
static void init_384(struct fmt_main *self) {
init(self, BINARY_SIZE_384);
}
static void done(void)
{
MEM_FREE(saved_plain);
#ifdef SIMD_COEF_64
MEM_FREE(prep_opad);
MEM_FREE(prep_ipad);
#else
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
#endif
MEM_FREE(opad);
MEM_FREE(ipad);
MEM_FREE(crypt_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') &&
strchr(ciphertext, '.') != strrchr(ciphertext, '.')) {
// Treat this like a JWT hash. Convert into 'normal' hmac-sha512 format.
char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi;
strnzcpy(tmp, ciphertext, sizeof(tmp));
cpi = strchr(tmp, '.');
cpi = strchr(&cpi[1], '.');
if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN)
return ciphertext;
*cpi++ = 0;
memset(buf, 0, sizeof(buf));
base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex,
sizeof(buf), flg_Base64_NO_FLAGS, 0);
if (strlen(buf) != B_LEN * 2)
return ciphertext;
sprintf(out, "%s#%s", tmp, buf);
} else
strnzcpy(out, ciphertext, sizeof(out));
strlwr(strrchr(out, '#'));
return out;
}
static char *split_512(char *ciphertext, int index, struct fmt_main *self) {
return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH);
}
static char *split_384(char *ciphertext, int index, struct fmt_main *self) {
return split(ciphertext, index, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384);
}
static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN)
{
int pos, i;
char *p;
p = strrchr(ciphertext, '#'); // allow # in salt
if (!p && strchr(ciphertext, '.') &&
strchr(ciphertext, '.') != strrchr(ciphertext, '.')) {
if (strlen(ciphertext) > CT_LEN)
return 0;
ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN);
p = strrchr(ciphertext, '#');
}
if (!p || p > &ciphertext[strlen(ciphertext)-1])
return 0;
i = (int)(p - ciphertext);
if (i > SALT_LENGTH)
return 0;
pos = i + 1;
if (strlen(ciphertext + pos) != B_LEN * 2)
return 0;
for (i = pos; i < B_LEN * 2 + pos; i++)
{
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static int valid_512(char *ciphertext, struct fmt_main *self) {
return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH);
}
static int valid_384(char *ciphertext, struct fmt_main *self) {
return valid(ciphertext, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384);
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_64
cur_salt = salt;
#else
strcpy((char*)cur_salt, (char*)salt);
#endif
}
static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN)
{
int len;
#ifdef SIMD_COEF_64
uint64_t *ipadp = (uint64_t*)&ipad[GETPOS(7, index)];
uint64_t *opadp = (uint64_t*)&opad[GETPOS(7, index)];
const uint64_t *keyp = (uint64_t*)key;
uint64_t temp;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
unsigned char k0[BINARY_SIZE];
SHA512_CTX ctx;
int i;
if (B_LEN == BINARY_SIZE) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, key, len);
SHA512_Final(k0, &ctx);
} else {
SHA384_Init(&ctx);
SHA384_Update(&ctx, key, len);
SHA384_Final(k0, &ctx);
}
keyp = (uint64_t*)k0;
for (i = 0; i < B_LEN / 8; i++, ipadp += SIMD_COEF_64, opadp += SIMD_COEF_64)
{
temp = JOHNSWAP64(*keyp++);
*ipadp ^= temp;
*opadp ^= temp;
}
}
else
while(((temp = JOHNSWAP64(*keyp++)) & 0xff00000000000000ULL)) {
if (!(temp & 0x00ff000000000000ULL) || !(temp & 0x0000ff0000000000ULL))
{
((unsigned short*)ipadp)[3] ^=
(unsigned short)(temp >> 48);
((unsigned short*)opadp)[3] ^=
(unsigned short)(temp >> 48);
break;
}
if (!(temp & 0x00ff00000000ULL) || !(temp & 0x0000ff000000ULL))
{
((uint32_t*)ipadp)[1] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[1] ^=
(uint32_t)(temp >> 32);
break;
}
if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00))
{
((uint32_t*)ipadp)[1] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[1] ^=
(uint32_t)(temp >> 32);
((unsigned short*)ipadp)[1] ^=
(unsigned short)(temp >> 16);
((unsigned short*)opadp)[1] ^=
(unsigned short)(temp >> 16);
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0xff))
break;
ipadp += SIMD_COEF_64;
opadp += SIMD_COEF_64;
}
#else
int i;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
if (len > PAD_SIZE) {
SHA512_CTX ctx;
unsigned char k0[BINARY_SIZE];
if (B_LEN == BINARY_SIZE) {
SHA512_Init( &ctx );
SHA512_Update( &ctx, key, len);
SHA512_Final( k0, &ctx);
} else {
SHA384_Init( &ctx );
SHA384_Update( &ctx, key, len);
SHA384_Final( k0, &ctx);
}
len = B_LEN;
for (i=0;i<len;i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
for (i=0;i<len;i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
#endif
new_keys = 1;
}
static void set_key_512(char *key, int index) {
set_key(key, index, BINARY_SIZE);
}
static void set_key_384(char *key, int index) {
set_key(key, index, BINARY_SIZE_384);
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_64
unsigned int index;
for (index = 0; index < count; index++) {
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64)
if (((uint64_t*)binary)[0] == ((uint64_t*)crypt_key)[(index&(SIMD_COEF_64-1))+index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64])
return 1;
}
return 0;
#else
int index = 0;
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index, int B_LEN)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < (B_LEN/8); i++)
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64)
if (((uint64_t*)binary)[i] != ((uint64_t*)crypt_key)[i * SIMD_COEF_64 + (index & (SIMD_COEF_64-1)) + (index/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], B_LEN);
#endif
}
static int cmp_one_512(void *binary, int index) {
return cmp_one(binary, index, BINARY_SIZE);
}
static int cmp_one_384(void *binary, int index) {
return cmp_one(binary, index, BINARY_SIZE_384);
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt,
#ifdef SIMD_COEF_64
const unsigned EX_FLAGS
#else
const int B_LEN
#endif
)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
unsigned int i;
if (new_keys) {
SIMDSHA512body(&ipad[index * PAD_SIZE],
(uint64_t*)&prep_ipad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN|EX_FLAGS);
SIMDSHA512body(&opad[index * PAD_SIZE],
(uint64_t*)&prep_opad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN|EX_FLAGS);
}
SIMDSHA512body(cur_salt->salt[0],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&prep_ipad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
for (i = 1; i <= (cur_salt->salt_len + 16) / PAD_SIZE; i++)
SIMDSHA512body(cur_salt->salt[i],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&crypt_key[index * PAD_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
if (EX_FLAGS) {
// NOTE, SSESHA384 will output 64 bytes. We need the first 48 (plus the 0x80 padding).
// so we are forced to 'clean' this crap up, before using the crypt as the input.
uint64_t *pclear = (uint64_t*)&crypt_key[index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64*8];
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) {
pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64] = 0x8000000000000000ULL;
pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64+SIMD_COEF_64] = 0;
}
}
SIMDSHA512body(&crypt_key[index * PAD_SIZE],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&prep_opad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
#else
SHA512_CTX ctx;
// Note, for oSSL, we really only need SHA512_Init and SHA384_Init. From that point
// on, SHA512_Update/SHA512_Final can be used. Also, jtr internal sha2.c file works
// like that. BUT I am not sure every hash engine works that way, so we are keeping
// the 'full' block.
if (B_LEN == BINARY_SIZE) {
if (new_keys) {
SHA512_Init(&ipad_ctx[index]);
SHA512_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA512_Init(&opad_ctx[index]);
SHA512_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA512_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA512_Update( &ctx, crypt_key[index], B_LEN);
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
} else {
if (new_keys) {
SHA384_Init(&ipad_ctx[index]);
SHA384_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA384_Init(&opad_ctx[index]);
SHA384_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA384_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );
SHA384_Final( (unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA384_Update( &ctx, crypt_key[index], B_LEN);
SHA384_Final( (unsigned char*) crypt_key[index], &ctx);
}
#endif
}
new_keys = 0;
return count;
}
static int crypt_all_512(int *pcount, struct db_salt *salt) {
#ifdef SIMD_COEF_64
return crypt_all(pcount, salt, 0);
#else
return crypt_all(pcount, salt, BINARY_SIZE);
#endif
}
static int crypt_all_384(int *pcount, struct db_salt *salt) {
#ifdef SIMD_COEF_64
return crypt_all(pcount, salt, SSEi_CRYPT_SHA384);
#else
return crypt_all(pcount, salt, BINARY_SIZE_384);
#endif
}
static void *get_binary(char *ciphertext, const int B_LEN)
{
JTR_ALIGN(BINARY_ALIGN) static unsigned char realcipher[BINARY_SIZE];
int i,pos;
for (i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt
pos=i+1;
for (i=0;i<B_LEN;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])];
#ifdef SIMD_COEF_64
alter_endianity_w64(realcipher, B_LEN/8);
#endif
return (void*)realcipher;
}
static void *get_binary_512(char *ciphertext) {
return get_binary(ciphertext, BINARY_SIZE);
}
static void *get_binary_384(char *ciphertext) {
return get_binary(ciphertext, BINARY_SIZE_384);
}
static void *get_salt(char *ciphertext)
{
static unsigned char salt[SALT_LENGTH+1];
int len;
#ifdef SIMD_COEF_64
unsigned int i = 0;
static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt;
int salt_len = 0;
#endif
// allow # in salt
len = strrchr(ciphertext, '#') - ciphertext;
memset(salt, 0, sizeof(salt));
memcpy(salt, ciphertext, len);
#ifdef SIMD_COEF_64
memset(&cur_salt, 0, sizeof(cur_salt));
while(((unsigned char*)salt)[salt_len])
{
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] =
((unsigned char*)salt)[salt_len];
++salt_len;
}
cur_salt.salt_len = salt_len;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80;
((uint64_t*)cur_salt.salt[(salt_len+16) / PAD_SIZE])[15 * SIMD_COEF_64 + (i & (SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (salt_len + PAD_SIZE) << 3;
}
return &cur_salt;
#else
return salt;
#endif
}
struct fmt_main fmt_hmacSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests
}, {
init_512,
done,
fmt_default_reset,
fmt_default_prepare,
valid_512,
split_512,
get_binary_512,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key_512,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all_512,
{
fmt_default_get_hash
},
cmp_all,
cmp_one_512,
cmp_exact
}
};
struct fmt_main fmt_hmacSHA384 = {
{
FORMAT_LABEL_384,
FORMAT_NAME,
ALGORITHM_NAME_384,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_384,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests_384
}, {
init_384,
done,
fmt_default_reset,
fmt_default_prepare,
valid_384,
split_384,
get_binary_384,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key_384,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all_384,
{
fmt_default_get_hash
},
cmp_all,
cmp_one_384,
cmp_exact
}
};
#endif /* plugin stanza */
|
dmul_eq_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include "sptensor.h"
/**
* Openmp parallelized Element wise multiply two sparse tensors, with exactly the same nonzero
* distribution.
* @param[out] Z the result of X*Y, should be uninitialized
* @param[in] X the input X
* @param[in] Y the input Y
*/
int ptiOmpSparseTensorDotMulEq(ptiSparseTensor *Z, ptiSparseTensor * const X, ptiSparseTensor * const Y)
{
ptiNnzIndex i;
/* Ensure X and Y are in same shape */
if(Y->nmodes != X->nmodes) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "SpTns DotMul", "shape mismatch");
}
for(i = 0; i < X->nmodes; ++i) {
if(Y->ndims[i] != X->ndims[i]) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "SpTns DotMul", "shape mismatch");
}
}
/* Ensure X and Y have exactly the same nonzero distribution */
if(Y->nnz != X->nnz) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "SpTns DotMul", "nonzero distribution mismatch");
}
ptiNnzIndex nnz = X->nnz;
ptiCopySparseTensor(Z, X, 1);
ptiTimer timer;
ptiNewTimer(&timer, 0);
ptiStartTimer(timer);
#pragma omp parallel for
for(i=0; i< nnz; ++i)
Z->values.data[i] = X->values.data[i] * Y->values.data[i];
ptiStopTimer(timer);
ptiPrintElapsedTime(timer, "OMP SpTns DotMul");
ptiFreeTimer(timer);
/* Check whether elements become zero after adding.
If so, fill the gap with the [nnz-1]'th element.
*/
pti_SparseTensorCollectZeros(Z);
/* Sort the indices */
ptiSparseTensorSortIndex(Z, 1, 1);
return 0;
}
|
par_gsmg.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Geometrically smooth interpolation multigrid
*
*****************************************************************************/
#include <stdio.h>
#include <math.h>
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "_hypre_lapack.h"
#ifndef ABS
#define ABS(x) ((x)>0 ? (x) : -(x))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x)
{
HYPRE_Real temp = 0.;
HYPRE_Int i;
for (i=0; i<n; i++)
temp = temp + x[i]*x[i];
return sqrt(temp);
}
static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x)
{
HYPRE_Int i;
for (i=0; i<n; i++)
x[i] = a * x[i];
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFillSmooth
* - fill in smooth matrix
* - this function will scale the smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples,
hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A,
HYPRE_Int num_functions, HYPRE_Int *dof_func)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, k, ii, index, start;
HYPRE_Int num_cols_offd;
HYPRE_Int num_sends;
HYPRE_Int *dof_func_offd;
HYPRE_Int *int_buf_data;
HYPRE_Real temp;
HYPRE_Real *p;
HYPRE_Real *p_offd;
HYPRE_Real *p_ptr;
HYPRE_Real *buf_data;
HYPRE_Real nm;
#if 0
HYPRE_Real mx = 0., my = 1.e+10;
#endif
/* normalize each sample vector and divide by number of samples */
for (k=0; k<nsamples; k++)
{
nm = mydnrm2(n, samples+k*n);
nm = 1./nm/nsamples;
mydscal(n, nm, samples+k*n);
}
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST);
p_ptr = p_offd;
p = samples;
for (k = 0; k < nsamples; k++)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
buf_data[index++]
= p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data,
p_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
p = p+n;
p_offd = p_offd+num_cols_offd;
}
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n; i++)
{
for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++)
{
ii = S_diag_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func[ii])
{
S_diag_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_diag_data[j] == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p[ii]);
p = p + n;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_diag_data[j] = temp;
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
ii = S_offd_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func_offd[ii])
{
S_offd_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_offd_data[j] == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
p_offd = p_ptr;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p_offd[ii]);
p = p + n;
p_offd = p_offd + num_cols_offd;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_offd_data[j] = temp;
}
}
#if 0
hypre_printf("MIN, MAX: %f %f\n", my, mx);
#endif
hypre_TFree(p_ptr, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixChooseThresh
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j;
HYPRE_Real mx, minimax = 1.e+10;
HYPRE_Real minmin;
for (i=0; i<n; i++)
{
mx = 0.;
for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++)
mx = hypre_max(mx, S_diag_data[j]);
for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++)
mx = hypre_max(mx, S_offd_data[j]);
if (mx != 0.)
minimax = hypre_min(minimax, mx);
}
hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm);
return minmin;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int count, i, jS, jA;
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_diag; i++)
if (A_diag_data[i] >= thresh)
count++;
/* allocate vectors */
S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= thresh)
{
S_diag_data[jS] = A_diag_data[jA];
S_diag_j[jS] = A_diag_j[jA];
jS++;
}
}
}
S_diag_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_diag) = jS;
/* free the vectors we don't need */
hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_diag) = S_diag_i;
hypre_CSRMatrixJ(A_diag) = S_diag_j;
hypre_CSRMatrixData(A_diag) = S_diag_data;
/*
* Offd part
*/
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_offd; i++)
if (A_offd_data[i] >= thresh)
count++;
/* allocate vectors */
S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= thresh)
{
S_offd_data[jS] = A_offd_data[jA];
S_offd_j[jS] = A_offd_j[jA];
jS++;
}
}
}
S_offd_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_offd) = jS;
/* free the vectors we don't need */
hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_offd) = S_offd_i;
hypre_CSRMatrixJ(A_offd) = S_offd_j;
hypre_CSRMatrixData(A_offd) = S_offd_data;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothVecs
* - smoother depends on the level being used
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothVecs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Int num_sweeps,
HYPRE_Int level,
HYPRE_Real **SmoothVecs_p)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_ParVector *Zero;
hypre_ParVector *Temp;
hypre_ParVector *U;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int i;
HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int sample;
HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data);
HYPRE_Int ret;
HYPRE_Real *datax, *bp, *p;
HYPRE_Int rlx_type;
HYPRE_Int smooth_type;
HYPRE_Int smooth_option = 0;
HYPRE_Int smooth_num_levels;
HYPRE_Solver *smoother;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (debug_flag >= 1)
hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps,
nsamples);
smooth_type = hypre_ParAMGDataSmoothType(amg_data);
smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data);
if (smooth_num_levels > level)
{
smooth_option = smooth_type;
smoother = hypre_ParAMGDataSmoother(amg_data);
num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data);
}
rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0];
/* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */
/* omega = hypre_ParAMGDataOmega(amg_data)[level]; */
/* generate par vectors */
Zero = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Zero);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero));
for (i=0; i<n_local; i++)
datax[i] = 0.;
Temp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Temp);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp));
for (i=0; i<n_local; i++)
datax[i] = 0.;
U = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(U);
datax = hypre_VectorData(hypre_ParVectorLocalVector(U));
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Qtemp);
}
/* allocate space for the vectors */
bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST);
p = bp;
/* generate random vectors */
for (sample=0; sample<nsamples; sample++)
{
for (i=0; i<n_local; i++)
datax[i] = hypre_Rand() - .5;
for (i=0; i<num_sweeps; i++)
{
if (smooth_option == 6)
{
HYPRE_SchwarzSolve(smoother[level],
(HYPRE_ParCSRMatrix) A,
(HYPRE_ParVector) Zero,
(HYPRE_ParVector) U);
}
else
{
ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/,
rlx_type , 0 /*rel pts*/, 1.0 /*weight*/,
1.0 /*omega*/, NULL, U, Temp,
Qtemp);
hypre_assert(ret == 0);
}
}
/* copy out the solution */
for (i=0; i<n_local; i++)
*p++ = datax[i];
}
hypre_ParVectorDestroy(Zero);
hypre_ParVectorDestroy(Temp);
hypre_ParVectorDestroy(U);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
*SmoothVecs_p = bp;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothDirs replaces CreateS in AMG
* - smoother depends on the level being used
* - in this version, CreateSmoothVecs must be called prior to this function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothDirs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Real *SmoothVecs,
HYPRE_Real thresh,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParCSRMatrix *S;
HYPRE_Real minimax;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
S = hypre_ParCSRMatrixClone(A, 0);
/* Traverse S and fill in differences */
hypre_ParCSRMatrixFillSmooth(
hypre_ParAMGDataNumSamples(amg_data), SmoothVecs,
S, A, num_functions, dof_func);
minimax = hypre_ParCSRMatrixChooseThresh(S);
if (debug_flag >= 1)
hypre_printf("Minimax chosen: %f\n", minimax);
/* Threshold and compress */
hypre_ParCSRMatrixThreshold(S, thresh*minimax);
*S_ptr = S;
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGNormalizeVecs
*
* Normalize the smooth vectors and also make the first vector the constant
* vector
*
* inputs:
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
*
* output:
* V = adjusted smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V)
{
HYPRE_Int i, j;
HYPRE_Real nrm;
/* change first vector to the constant vector */
for (i=0; i<n; i++)
V[i] = 1.0;
for (j=0; j<num; j++)
{
nrm = mydnrm2(n, &V[j*n]);
mydscal(n, 1./nrm, &V[j*n]);
}
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGFitVectors
*
* Construct interpolation weights based on fitting smooth vectors
*
* inputs:
* ip = row number of row in P being processed (0-based)
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
* nc = number of coarse grid points
* ind = indices of coarse grid points (0-based)
*
* output:
* val = interpolation weights for the coarse grid points
* V = smooth vectors; first one has been changed to constant vector;
* vectors have also been normalized; this is also an input
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V,
HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val)
{
HYPRE_Real *a, *b;
HYPRE_Real *ap;
HYPRE_Int i, j;
HYPRE_Real *work;
HYPRE_Int work_size;
HYPRE_Int info;
HYPRE_Int temp;
/*
hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc);
for (i=0; i<nc; i++)
hypre_printf("%d ", ind[i]);
hypre_printf("\n");
*/
if (nc == 0)
return 0;
work_size = 2000*64;
work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST);
a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST);
ap = a;
for (j=0; j<nc; j++)
{
for (i=0; i<num; i++)
{
*ap = V[i*n+ind[j]];
ap++;
}
}
temp = MAX(nc, num);
b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST);
for (i=0; i<num; i++)
b[i] = V[i*n+ip];
{
char trans = 'N';
HYPRE_Int one = 1;
hypre_dgels(&trans, &num, &nc, &one, a, &num,
b, &temp, work, &work_size, &info);
if (info != 0)
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n");
/* copy solution into output vector */
for (j=0; j<nc; j++)
val[j] = b[j];
}
hypre_TFree(b, HYPRE_MEMORY_HOST);
hypre_TFree(a, HYPRE_MEMORY_HOST);
hypre_TFree(work, HYPRE_MEMORY_HOST);
return info;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpLS
*
* Interpolation built from fitting smooth vectors
* - sequential version only
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int num_smooth,
HYPRE_Real *SmoothVecs,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
/* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
/* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
/* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
//HYPRE_Real *S_ext_data;
//HYPRE_Int *S_ext_i;
//HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker;
/* HYPRE_Int *P_marker_offd; */
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
/* HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd; */
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
//HYPRE_BigInt *big_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
//S_ext_i = hypre_CSRMatrixI(S_ext);
//S_ext_j = hypre_CSRMatrixBigJ(S_ext);
//S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
/* removed */
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
HYPRE_Int kk;
HYPRE_Int indices[1000]; /* kludge */
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
kk = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i1];
jj_counter++;
indices[kk] = i1;
kk++;
}
}
hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,
kk, indices, &P_diag_data[P_diag_i[i]]);
/* Off-Diagonal part of P */
/* undone */
}
}
}
P_diag_i[i] = jj_counter; /* check that this is in right place for threads */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpGSMG
*
* Difference with hypre_BoomerAMGBuildInterp is that S contains values
* and is used to build interpolation weights. Matrix A is not used.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int *tmp_map_offd = NULL;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
//HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_BigInt big_i2;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int start;
HYPRE_Int c_num;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
total_global_cpts = 0; /* we will set this later for the matrix in the setup */
/* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_S_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
/* Loop over ith row of S. First, the diagonal part of S */
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += S_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
sum += S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
sum += S_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = S_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
P_diag_data[P_marker[i2]]
+= distribute * S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i2]]
+= distribute * S_offd_data[jj1];
}
}
}
else
{
/* do nothing */
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else
{
/* do nothing */
}
}
/*----------------------------------------------------------------
* Still looping over ith row of S. Next, loop over the
* off-diagonal part of S
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = S_offd_j[jj];
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n)
{
/* in the diagonal block */
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
sum += S_ext_data[jj1];
}
else
{
/* in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
sum += S_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = S_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */
{
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]]
+= distribute * S_ext_data[jj1];
}
else
{
/* check to see if it is in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[j]]
+= distribute * S_ext_data[jj1];
}
}
}
}
else
{
/* do nothing */
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else
{
/* do nothing */
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
sum = 0.;
for (jj = jj_begin_row; jj < jj_end_row; jj++)
sum += P_diag_data[jj];
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
sum += P_offd_data[jj];
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= sum;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= sum;
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
|
adam_op.h | #pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void adam_update(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr,
Context* context) {
#pragma omp parallel for
for (auto i = 0; i < N; ++i) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
ng[i] = lr[0] * correction * mi / (sqrt(vi) + eps_hat);
}
}
template <typename T, class Context>
class AdamOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AdamOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
beta1_(OperatorBase::GetSingleArgument<float>("beta1", 0.9)),
beta2_(OperatorBase::GetSingleArgument<float>("beta2", 0.999)),
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)) {}
bool RunOnDevice() override {
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsType<TensorCPU>(ITER));
CAFFE_ENFORCE(Input(LR).size() == 1);
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_1).size());
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_2).size());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
const auto iter =
OperatorBase::Input<TensorCPU>(ITER).template data<int>()[0];
const auto t = iter + 1;
const auto correction =
std::sqrt(T(1.) - std::pow(beta2_, t)) / (T(1.) - std::pow(beta1_, t));
adam_update<Context>(
Input(GRAD).size(),
Input(GRAD).template data<T>(),
Input(MOMENT_1).template data<T>(),
Input(MOMENT_2).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<T>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<T>(),
beta1_,
beta2_,
epsilon_,
correction,
Input(LR).template data<T>(),
&context_);
return true;
}
protected:
T beta1_{0.9};
T beta2_{0.999};
T epsilon_{1e-8};
INPUT_TAGS(GRAD, MOMENT_1, MOMENT_2, LR, ITER);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENT_1, OUTPUT_MOMENT_2);
};
}
|
GB_binop__max_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint64)
// A*D function (colscale): GB (_AxD__max_uint64)
// D*A function (rowscale): GB (_DxB__max_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint64)
// C=scalar+B GB (_bind1st__max_uint64)
// C=scalar+B' GB (_bind1st_tran__max_uint64)
// C=A+scalar GB (_bind2nd__max_uint64)
// C=A'+scalar GB (_bind2nd_tran__max_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT64 || GxB_NO_MAX_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_uint64
// op(A') function: GB_unop_tran__identity_bool_uint64
// C type: bool
// A type: uint64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_uint64
(
bool *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_relax_more.c |
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG -
* these do not go through the CF interface (hypre_BoomerAMGRelaxIF)
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int*,HYPRE_Real *,HYPRE_Real *,HYPRE_Int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Real *max_eig)
{
HYPRE_Real e_max;
HYPRE_Real row_sum, max_norm;
HYPRE_Real *col_val;
HYPRE_Real temp;
HYPRE_Real diag_value;
HYPRE_Int pos_diag, neg_diag;
HYPRE_Int start_row, end_row;
HYPRE_Int row_length;
HYPRE_Int *col_ind;
HYPRE_Int j;
HYPRE_Int i;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
start_row = hypre_ParCSRMatrixFirstRowIndex(A);
end_row = hypre_ParCSRMatrixLastRowIndex(A);
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = start_row; i <= end_row; i++ )
{
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
row_sum = 0.0;
for (j = 0; j < row_length; j++)
{
if (j==0) diag_value = fabs(col_val[j]);
row_sum += fabs(col_val[j]);
if ( col_ind[j] == i && col_val[j] > 0.0 ) pos_diag++;
if ( col_ind[j] == i && col_val[j] < 0.0 ) neg_diag++;
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
}
/* get max across procs */
hypre_MPI_Allreduce(&max_norm, &temp, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
HYPRE_Int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
HYPRE_Real *tridiag = NULL;
HYPRE_Real *trioffd = NULL;
HYPRE_Real lambda_max ;
HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
HYPRE_Real diag;
HYPRE_Real lambda_min;
HYPRE_Real *s_data, *p_data, *ds_data, *u_data;
HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_Int size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < max_iter)
max_iter = size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(HYPRE_Real, max_iter+1);
trioffd = hypre_CTAlloc(HYPRE_Real, max_iter+1);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* hypre_printf("linpack max eig est = %g\n", lambda_max);*/
/* hypre_printf("linpack min eig est = %g\n", lambda_min);*/
hypre_TFree(tridiag);
hypre_TFree(trioffd);
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real coefs[5];
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Real tmp_d;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data, *tmp_data;
HYPRE_Real diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
HYPRE_Int i;
HYPRE_Int relax_points[3];
HYPRE_Int relax_type = 0;
hypre_ParVector *Ztemp = NULL;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* if we are on the coarsest level ,the cf_marker will be null
and we just do one sweep regular jacobi */
if (cf_marker == NULL)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
0,
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
else
{
for (i=0; i < 3; i++)
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother -
*
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int num_its)
{
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
#if 0
{
HYPRE_Int myid;
HYPRE_Int num_iterations;
HYPRE_Real final_res_norm;
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
if (myid ==0)
{
hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations);
hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real*,HYPRE_Real*);
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n,HYPRE_Real *d,HYPRE_Real *e,HYPRE_Int *ierr)
{
/* System generated locals */
HYPRE_Int i__1,i__2;
HYPRE_Real d__1,d__2,c_b10 = 1.0;
/* Local variables */
HYPRE_Real c,f,g,h;
HYPRE_Int i,j,l,m;
HYPRE_Real p,r,s,c2,c3 = 0.0;
HYPRE_Int l1,l2;
HYPRE_Real s2 = 0.0;
HYPRE_Int ii;
HYPRE_Real dl1,el1;
HYPRE_Int mml;
HYPRE_Real tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
HYPRE_Real ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a,HYPRE_Real *b)
{
/* System generated locals */
HYPRE_Real ret_val,d__1,d__2,d__3;
/* Local variables */
HYPRE_Real p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (same as the one in AMS, but this allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id ;
HYPRE_Real zero = 0.0;
HYPRE_Real res;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return 0;
}
|
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017-2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "perftest.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/sys/sock.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <unistd.h>
#include <netdb.h>
#include <sys/poll.h>
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency", "latency", 1},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency", "latency", 1},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency", "latency", 1},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate", "latency", 1},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate", "latency", 1},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate", "latency", 1},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate", "latency", 1},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate", "overhead", 1},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate", "overhead", 1},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate", "overhead", 1},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency", "latency", 1},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth", "overhead", 32},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency", "latency", 1},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth", "overhead", 32},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency", "latency", 1},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth", "overhead", 32},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate", "latency", 1},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate", "overhead", 1},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate", "latency", 1},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate", "latency", 1},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate", "latency", 1},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth", "overhead", 1},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency", "latency", 1},
{"ucp_am_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"am latency", "latency", 1},
{"ucp_am_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"am bandwidth / message rate", "overhead", 32},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
typedef ssize_t (*sock_call)(int, void *, size_t, int);
ucs_assert(sock >= 0);
return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
ucs_assert(sock >= 0);
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
ucs_status_t init_test_params(perftest_params_t *params)
{
memset(params, 0, sizeof(*params));
params->super.api = UCX_PERF_API_LAST;
params->super.command = UCX_PERF_CMD_LAST;
params->super.test_type = UCX_PERF_TEST_TYPE_LAST;
params->super.thread_mode = UCS_THREAD_MODE_SINGLE;
params->super.thread_count = 1;
params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->super.max_outstanding = 0;
params->super.warmup_iter = 10000;
params->super.warmup_time = 100e-3;
params->super.alignment = ucs_get_page_size();
params->super.max_iter = 1000000l;
params->super.max_time = 0.0;
params->super.report_interval = 1.0;
params->super.percentile_rank = 50.0;
params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->super.uct.am_hdr_size = 8;
params->super.send_mem_type = UCS_MEMORY_TYPE_HOST;
params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST;
params->super.msg_size_cnt = 1;
params->super.iov_stride = 0;
params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
params->super.ucp.am_hdr_size = 0;
strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE);
params->super.msg_size_list = calloc(params->super.msg_size_cnt,
sizeof(*params->super.msg_size_list));
if (params->super.msg_size_list == NULL) {
return UCS_ERR_NO_MEMORY;
}
params->super.msg_size_list[0] = 8;
params->test_id = TEST_ID_UNDEFINED;
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->size;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
sock_rte_group_t *group = rte_group;
if (group->size > 1) {
const unsigned magic = 0xdeadbeef;
unsigned snc;
snc = magic;
safe_send(group->sendfd, &snc, sizeof(unsigned), progress, arg);
snc = 0;
if (safe_recv(group->recvfd, &snc, sizeof(unsigned), progress, arg) == 0) {
ucs_assert(snc == magic);
}
}
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->sendfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->sendfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
size_t size;
if (src != group->peer) {
return;
}
safe_recv(group->recvfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->recvfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, const char *extra_info, int is_final,
int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info,
ctx->flags, is_final, ctx->server_addr == NULL,
is_multi_thread);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte_loobkack(struct perftest_context *ctx)
{
int connfds[2];
int ret;
ctx->flags |= TEST_FLAG_PRINT_TEST | TEST_FLAG_PRINT_RESULTS;
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, connfds);
if (ret < 0) {
ucs_error("socketpair() failed: %m");
return UCS_ERR_IO_ERROR;
}
ctx->sock_rte_group.peer = 0;
ctx->sock_rte_group.size = 1;
ctx->sock_rte_group.is_server = 1;
ctx->sock_rte_group.sendfd = connfds[0];
ctx->sock_rte_group.recvfd = connfds[1];
return UCS_OK;
}
static ucs_status_t setup_sock_rte_p2p(struct perftest_context *ctx)
{
int optval = 1;
int sockfd = -1;
char addr_str[UCS_SOCKADDR_STRING_LEN];
struct sockaddr_storage client_addr;
socklen_t client_addr_len;
int connfd;
struct addrinfo hints, *res, *t;
ucs_status_t status;
int ret;
char service[8];
char err_str[64];
ucs_snprintf_safe(service, sizeof(service), "%u", ctx->port);
memset(&hints, 0, sizeof(hints));
hints.ai_flags = (ctx->server_addr == NULL) ? AI_PASSIVE : 0;
hints.ai_family = ctx->af;
hints.ai_socktype = SOCK_STREAM;
ret = getaddrinfo(ctx->server_addr, service, &hints, &res);
if (ret < 0) {
ucs_error("getaddrinfo(server:%s, port:%s) error: [%s]",
ctx->server_addr, service, gai_strerror(ret));
status = UCS_ERR_IO_ERROR;
goto out;
}
if (res == NULL) {
snprintf(err_str, 64, "getaddrinfo() returned empty list");
}
for (t = res; t != NULL; t = t->ai_next) {
sockfd = socket(t->ai_family, t->ai_socktype, t->ai_protocol);
if (sockfd < 0) {
snprintf(err_str, 64, "socket() failed: %m");
continue;
}
if (ctx->server_addr != NULL) {
if (connect(sockfd, t->ai_addr, t->ai_addrlen) == 0) {
break;
}
snprintf(err_str, 64, "connect() failed: %m");
} else {
status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
&optval, sizeof(optval));
if (status != UCS_OK) {
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
if (bind(sockfd, t->ai_addr, t->ai_addrlen) == 0) {
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
client_addr_len = sizeof(client_addr);
connfd = accept(sockfd, (struct sockaddr*)&client_addr,
&client_addr_len);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
ucs_sockaddr_str((struct sockaddr*)&client_addr, addr_str,
sizeof(addr_str));
printf("Accepted connection from %s\n", addr_str);
close(sockfd);
break;
}
snprintf(err_str, 64, "bind() failed: %m");
}
close(sockfd);
sockfd = -1;
}
if (sockfd < 0) {
ucs_error("%s failed. %s",
(ctx->server_addr != NULL) ? "client" : "server", err_str);
status = UCS_ERR_IO_ERROR;
goto out_free_res;
}
if (ctx->server_addr == NULL) {
/* release the memory for the list of the message sizes allocated
* during the initialization of the default testing parameters */
free(ctx->params.super.msg_size_list);
ctx->params.super.msg_size_list = NULL;
ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
if (ctx->params.super.msg_size_cnt != 0) {
ctx->params.super.msg_size_list =
calloc(ctx->params.super.msg_size_cnt,
sizeof(*ctx->params.super.msg_size_list));
if (NULL == ctx->params.super.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
ret = safe_recv(connfd, ctx->params.super.msg_size_list,
sizeof(*ctx->params.super.msg_size_list) *
ctx->params.super.msg_size_cnt,
NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
}
ctx->sock_rte_group.sendfd = connfd;
ctx->sock_rte_group.recvfd = connfd;
ctx->sock_rte_group.peer = 1;
ctx->sock_rte_group.is_server = 1;
} else {
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.super.msg_size_cnt != 0) {
safe_send(sockfd, ctx->params.super.msg_size_list,
sizeof(*ctx->params.super.msg_size_list) *
ctx->params.super.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.sendfd = sockfd;
ctx->sock_rte_group.recvfd = sockfd;
ctx->sock_rte_group.peer = 0;
ctx->sock_rte_group.is_server = 0;
}
ctx->sock_rte_group.size = 2;
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
status = UCS_OK;
goto out_free_res;
err_close_connfd:
ucs_close_fd(&connfd);
goto out_free_res;
err_close_sockfd:
ucs_close_fd(&sockfd);
out_free_res:
freeaddrinfo(res);
out:
return status;
}
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
ucs_status_t status;
if (ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) {
status = setup_sock_rte_loobkack(ctx);
} else {
status = setup_sock_rte_p2p(ctx);
}
if (status != UCS_OK) {
return status;
}
ctx->params.super.rte_group = &ctx->sock_rte_group;
ctx->params.super.rte = &sock_rte;
ctx->params.super.report_arg = ctx;
return UCS_OK;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
sock_rte_group_t *rte_group = &ctx->sock_rte_group;
close(rte_group->sendfd);
if (rte_group->sendfd != rte_group->recvfd) {
close(rte_group->recvfd);
}
return UCS_OK;
}
#if defined (HAVE_MPI)
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp barrier
#pragma omp master
{
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest != rte_peer_index(group_size, my_rank)) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
int my_rank, size;
size_t offset;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (src != rte_peer_index(size, my_rank)) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, const char *extra_info, int is_final,
int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info,
ctx->flags, is_final, ctx->server_addr == NULL,
is_multi_thread);
}
#elif defined (HAVE_RTE)
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
const char *extra_info, void *arg, int is_final,
int is_multi_thread)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info,
ctx->flags, is_final, ctx->server_addr == NULL,
is_multi_thread);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
#if defined (HAVE_MPI)
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
int size, rank;
ucs_trace_func("");
MPI_Comm_size(MPI_COMM_WORLD, &size);
if ((ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) &&
(size != 1)) {
ucs_error("This test should be run with 1 process "
"in loopback case (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
if (!(ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) &&
(size != 2)) {
ucs_error("This test should be run with exactly 2 processes "
"in p2p case (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Let the last rank print the results */
if (rank == (size - 1)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.super.rte_group = NULL;
ctx->params.super.rte = &mpi_rte;
ctx->params.super.report_arg = ctx;
#elif defined (HAVE_RTE)
ucs_trace_func("");
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
rte_group_t group;
rte_init(NULL, NULL, &group);
/* Let the last rank print the results */
if (rte_group_rank(group) == (rte_group_size(group) - 1)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.super.rte_group = group;
ctx->params.super.rte = &ext_rte;
ctx->params.super.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#ifdef HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
ucs_sys_cpuset_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = ucs_sys_get_num_cpus();
if (ret < 0) {
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
for (i = 0; i < ctx->num_cpus; i++) {
if (ctx->cpus[i] >= nr_cpus) {
ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
}
for (i = 0; i < ctx->num_cpus; i++) {
CPU_SET(ctx->cpus[i], &cpuset);
}
ret = ucs_sys_setaffinity(&cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = ucs_sys_getaffinity(&cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#ifdef HAVE_MPI
int provided;
mpi_initialized = !isatty(0) &&
/* Using MPI_THREAD_FUNNELED since ucx_perftest supports
* using multiple threads when only the main one makes
* MPI calls (which is also suitable for a single threaded
* run).
* MPI_THREAD_FUNNELED:
* The process may be multi-threaded, but only the main
* thread will make MPI calls (all MPI calls are funneled
* to the main thread). */
(MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0);
if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) {
printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n",
provided);
ret = -1;
goto out;
}
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out_msg_size_list;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#ifdef HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_msg_size_list;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_msg_size_list;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out_msg_size_list:
free(ctx.params.super.msg_size_list);
#if HAVE_MPI
out:
#endif
if (mpi_initialized) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
helper.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "helper.h"
#include <omp.h>
//int map(const int i,const int j,const int n);
void printarr(double *a, int n, int rank) {
// does nothing right now, should record each "frame" as image
char name[20];
sprintf(name, "heat_%d.svg", rank);
FILE *fp = fopen(name, "w");
const int size = 5;
fprintf(fp, "<html>\n<body>\n<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\">");
fprintf(fp, "\n<rect x=\"0\" y=\"0\" width=\"%i\" height=\"%i\" style=\"stroke-width:1;fill:rgb(0,0,0);stroke:rgb(0,0,0)\"/>", size*n, size*n);
for(int i=1; i<n+1; ++i)
for(int j=1; j<n+1; ++j) {
int rgb = (a[map(i,j,n+2)] > 0) ? rgb = (int)round(255.0*a[map(i,j,n+2)]) : 0.0;
if(rgb>255) rgb=255;
if(rgb) fprintf(fp, "\n<rect x=\"%i\" y=\"%i\" width=\"%i\" height=\"%i\" style=\"stroke-width:1;fill:rgb(%i,0,0);stroke:rgb(%i,0,0)\"/>", size*(i-1), size*(j-1), size, size, rgb, rgb);
}
fprintf(fp, "</svg>\n</body>\n</html>");
fclose(fp);
}
double calculate_total_heat(double *h_new, int n)
{
double heat = 0.0; // total heat in system
for (int i = 1; i < n + 1; ++i)
{
for (int j = 1; j < n + 1; ++j)
{
heat += h_new[map(i, j, n+2)];
}
}
return heat;
}
double calculate_total_heat_omp(double *h_new, int n, int num_threads)
{
double heat = 0.0; // total heat in system
//#pragma omp parallel for num_threads(num_threads) reduction(+:heat)
for (int i = 1; i < n + 1; ++i)
{
for (int j = 1; j < n + 1; ++j)
{
heat += h_new[map(i, j, n+2)];
}
}
return heat;
}
|
sparse.c | //
// sparse.c
//
// Created by Hussian Alamri on September 2012
//
#include "sparse.h"
/******* CCS functions *******/
CCS* CreateCCS(MATRIX *m) {
CCS *c = malloc(sizeof(CCS));
int i, j;
CRS *r = CreateCRS(m);
int *colInd = malloc(r->nnz * sizeof(int));
for(i=0; i < r->nnz; ++i)
colInd[i] = r->colInd[i];
c->nrows = r->nrows;
c->ncols = r->ncols;
c->nnz = r->nnz;
c->A = malloc(c->nnz * sizeof(double));
c->colPtr = malloc((c->ncols + 1) * sizeof(int));
c->rowInd = malloc(c->nnz * sizeof(int));
for (i=0; i <= c->ncols; ++i)
c->colPtr[i] = 0;
for (i=0; i < c->nnz; ++i)
c->colPtr[r->colInd[i] + 1] += 1;
for (i=0; i < c->ncols; ++i)
c->colPtr[i+1] += c->colPtr[i];
for (i=0; i < c->nrows; ++i)
for (j = r->rowPtr[i]; j < r->rowPtr[i+1]; ++j)
c->rowInd[j] = i;
for (i=0; i < c->nnz; ++i)
c->A[i] = r->A[i];
QuickSort(c->A, c->rowInd, colInd, 0, c->nnz);
DestroyCRS(r);
free(colInd);
return c;
}
void PrintCCS(CCS *c) {
int i;
printf("Header\n");
printf("%u ", (int)c->nrows);
printf("%u ", (int)c->ncols);
printf("%u\n", (int)c->nnz);
printf("Values - Row\n");
for (i=0; i < c->nnz; ++i) {
printf("%f\t", (double)c->A[i]);
printf("%u\n", (int)c->rowInd[i]);
}
printf("Column Pointer (colPtr)\n");
for (i=0; i < c->ncols + 1; ++i)
printf("%u ", (int)c->colPtr[i]);
printf("\n");
}
void MultiplyCCS(CCS *c, double *v, double* r) {
int i, j;
double temp;
int* rowInd = c->rowInd;
int nrows = c->nrows;
int* colPtr = c->colPtr;
int ncols = c->ncols;
double *A = c->A;
int nnz = c->nnz;
memset(r, (int)0, ncols*sizeof(double));
#pragma omp parallel for default(shared) private(i, j, temp)
for (i=0; i < ncols; ++i) {
temp = 0.0;
#pragma ivdep
for (j = colPtr[i]; j < colPtr[i+1]; ++j) {
r[rowInd[j]] += (double)(A[j] * v[i]);
temp += (double)(A[j] * v[i]);
}
r[rowInd[j]] = temp;
}
}
void DestroyCCS(CCS *c) {
free(c->colPtr);
free(c->rowInd);
free(c->A);
free(c);
}
/******* CRS functions *******/
CRS* CreateCRS(MATRIX *m) {
int i, k, j, index;
int nrows = m->nrows;
int ncols = m->ncols;
int nnz = m->nnz;
double** mal = m->mel;
CRS *cc = (CRS*)malloc(sizeof(CRS));
int* colInd = malloc(nnz * sizeof(int));
int* rowPtr = malloc((nrows+1) * sizeof(int));
double* A = malloc(nnz * sizeof(double));
for(i=0; i<nrows+1; ++i) {
rowPtr[i] = 0;
}
for (i=0; i<nnz; ++i) {
A[i] = 0;
colInd[i] = 0;
}
index = 0;
for(k=0; k<nrows; k++){
for(j=0; j<ncols; j++){
if(mal[k][j] != 0){
A[index] = mal[k][j];
colInd[index] = j;
index++;
}
}
rowPtr[k+1] = index;
}
cc->colInd = colInd;
cc->rowPtr = rowPtr;
cc->A = A;
cc->nrows = nrows;
cc->ncols = ncols;
cc->nnz = nnz;
return cc;
}
void PrintCRS(CRS *c) {
int i;
printf("Header\n");
printf("%u ", (int)c->nrows);
printf("%u ", (int)c->ncols);
printf("%u\n", (int)c->nnz);
printf("Values - Column\n");
for (i=0; i < c->nnz ; ++i) {
printf("%f\t", (double)c->A[i]);
printf("%u\n", (int)c->colInd[i]);
}
printf("Row Pointer\n");
for (i=0; i < c->nrows + 1; ++i)
printf("%u ", (int)c->rowPtr[i]);
printf("\n");
}
void MultiplyCRS(CRS *c, double *v, double* r) {
int i, j;
double temp;
int *rowPtr = c->rowPtr;
int *colInd = c->colInd;
int nrows = c->nrows;
int ncols = c->ncols;
double *A = c->A;
int nnz = c->nnz;
double t;
memset(r, (int)0, ncols*sizeof(double));
#pragma omp parallel for default(shared) private(i, j, t)
for (i=0; i < nrows; ++i) {
t = 0.0;
#pragma ivdep
for (j = rowPtr[i]; j < rowPtr[i+1]; ++j) {
t += (double)(A[j] * v[colInd[j]]);
}
r[i] = t;
}
}
void DestroyCRS(CRS *c) {
free(c->colInd);
free(c->rowPtr);
free(c->A);
free(c);
}
|
thd_info.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "thd_info.h"
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Perform a parallel SUM reduction.
*
* @param thds The thread structure we are using in the reduction.
* @param scratchid Which scratch array to reduce.
* @param nelems How many elements in the scratch array.
*/
static inline void p_reduce_sum(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems)
{
int const tid = splatt_omp_get_thread_num();
int const nthreads = splatt_omp_get_num_threads();
val_t * const myvals = (val_t *) thds[tid].scratch[scratchid];
int half = nthreads / 2;
while(half > 0) {
if(tid < half && tid + half < nthreads) {
val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += target[i];
}
}
#pragma omp barrier
/* check for odd number */
#pragma omp master
if(half > 1 && half % 2 == 1) {
val_t const * const last = (val_t *) thds[half-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += last[i];
}
}
/* next iteration */
half /= 2;
}
/* account for odd thread at end */
#pragma omp master
{
if(nthreads % 2 == 1) {
val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] += last[i];
}
}
}
#pragma omp barrier
}
/**
* @brief Perform a parallel MAX reduction.
*
* @param thds The thread structure we are using in the reduction.
* @param scratchid Which scratch array to reduce.
* @param nelems How many elements in the scratch array.
*/
static inline void p_reduce_max(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems)
{
int const tid = splatt_omp_get_thread_num();
int const nthreads = splatt_omp_get_num_threads();
val_t * const myvals = (val_t *) thds[tid].scratch[scratchid];
int half = nthreads / 2;
while(half > 0) {
if(tid < half && tid + half < nthreads) {
val_t const * const target = (val_t *) thds[tid+half].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], target[i]);
}
}
#pragma omp barrier
/* check for odd number */
#pragma omp master
if(half > 1 && half % 2 == 1) {
val_t const * const last = (val_t *) thds[half-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], last[i]);
}
}
/* next iteration */
half /= 2;
}
/* account for odd thread at end */
#pragma omp master
{
if(nthreads % 2 == 1) {
val_t const * const last = (val_t *) thds[nthreads-1].scratch[scratchid];
for(idx_t i=0; i < nelems; ++i) {
myvals[i] = SS_MAX(myvals[i], last[i]);
}
}
}
#pragma omp barrier
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void thd_reduce(
thd_info * const thds,
idx_t const scratchid,
idx_t const nelems,
splatt_reduce_type const which)
{
if(splatt_omp_get_num_threads() == 1) {
return;
}
/* just to be safe in case any thread data is being copied */
#pragma omp barrier
switch(which) {
case REDUCE_SUM:
p_reduce_sum(thds, scratchid, nelems);
break;
case REDUCE_MAX:
p_reduce_max(thds, scratchid, nelems);
break;
default:
fprintf(stderr, "SPLATT: thd_reduce supports SUM and MAX only.\n");
abort();
}
}
thd_info * thd_init(
idx_t const nthreads,
idx_t const nscratch,
...)
{
thd_info * thds = (thd_info *) splatt_malloc(nthreads * sizeof(thd_info));
for(idx_t t=0; t < nthreads; ++t) {
timer_reset(&thds[t].ttime);
thds[t].nscratch = nscratch;
thds[t].scratch = (void **) splatt_malloc(nscratch * sizeof(void*));
}
va_list args;
va_start(args, nscratch);
for(idx_t s=0; s < nscratch; ++s) {
idx_t const bytes = va_arg(args, idx_t);
for(idx_t t=0; t < nthreads; ++t) {
thds[t].scratch[s] = (void *) splatt_malloc(bytes);
memset(thds[t].scratch[s], 0, bytes);
}
}
va_end(args);
return thds;
}
void thd_times(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
printf(" thread: %"SPLATT_PF_IDX" %0.3fs\n", t, thds[t].ttime.seconds);
}
}
void thd_time_stats(
thd_info * thds,
idx_t const nthreads)
{
double max_time = 0.;
double avg_time = 0.;
for(idx_t t=0; t < nthreads; ++t) {
avg_time += thds[t].ttime.seconds;
max_time = SS_MAX(max_time, thds[t].ttime.seconds);
}
avg_time /= nthreads;
double const imbal = (max_time - avg_time) / max_time;
printf(" avg: %0.3fs max: %0.3fs (%0.1f%% imbalance)\n",
avg_time, max_time, 100. * imbal);
}
void thd_reset(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
timer_reset(&thds[t].ttime);
}
}
void thd_free(
thd_info * thds,
idx_t const nthreads)
{
for(idx_t t=0; t < nthreads; ++t) {
for(idx_t s=0; s < thds[t].nscratch; ++s) {
free(thds[t].scratch[s]);
}
free(thds[t].scratch);
}
free(thds);
}
|
pi-omp3.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise7.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 8
*
* Pi calculation
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
/**
* @brief EX 8- Pi Calculation
*
* This program computes pi as
* \pi = 4 arctan(1)
* = 4 \int _0 ^1 \frac{1} {1 + x^2} dx
*
* @return void
*/
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
#if !defined(ITERS)
#define ITERS (4)
#endif
#define NSTEPS 134217728
void exercise(){
long i;
double dx = 1.0 / NSTEPS;
double pi = 0.0;
double start_time = omp_get_wtime();
#pragma omp parallel for reduction(+:pi)
for (i = 0; i < NSTEPS; i++)
{
double x = (i + 0.5) * dx;
pi += 1.0 / (1.0 + x * x);
}
pi *= 4.0 * dx;
double run_time = omp_get_wtime() - start_time;
double ref_pi = 4.0 * atan(1.0);
printf("pi with %d steps is %.10f in %.6f seconds (error=%e)\n",
NSTEPS, pi, run_time, fabs(ref_pi - pi));
}
int
main(int argc, char** argv)
{
for(int i=0; i<ITERS; i++){
printf("\n\n");
printf("============================\n");
printf("Test - Iteration %d...\n", i);
printf("============================\n");
start_stats();
exercise();
collect_stats();
}
printf("\n\n");
printf("============================\n");
printf("Statistics\n");
printf("============================\n");
print_stats();
return 0;
}
|
GB_unop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint8_uint8)
// op(A') function: GB (_unop_tran__minv_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduce_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GraphBLAS.h"
#if defined ( _OPENMP )
#include <omp.h>
#endif
// #define N 65536
#define N 16384
int main (void)
{
#if defined ( _OPENMP )
double t0 = omp_get_wtime ( ) ;
#endif
// start GraphBLAS
GrB_init (GrB_NONBLOCKING) ;
int nthreads ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads) ;
printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ;
int nthreads_max ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads_max) ;
printf ("# of threads: %d\n", nthreads_max) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("GPU warmup time: %g\n", t0) ;
t0 = omp_get_wtime ( ) ;
#endif
GrB_Index nrows = N ;
GrB_Index ncols = N ;
GrB_Matrix A ;
GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ;
GrB_Index *I = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
GrB_Index *J = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
int64_t *X = (int64_t *) malloc (nrows * ncols * sizeof (int64_t)) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_max) schedule(static)
for (k = 0 ; k < N*N ; k++)
{
// k = i * N + j ;
int64_t i = k / N ;
int64_t j = k % N ;
// int x = (int) (rand ( ) & 0xFF) ;
int x = (int) (k & 0xFF) ;
I [k] = i ;
J [k] = j ;
X [k] = x ;
}
GrB_Index nvals = N*N ;
GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ;
free (I) ;
free (J) ;
free (X) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("time to create matrix: %g\n", t0) ;
#endif
GrB_Index result ;
double t1 ;
printf ("\nreduce to a scalar:\n") ;
for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++)
{
GxB_Global_Option_set (GxB_GLOBAL_NTHREADS, nthreads) ;
#if defined ( _OPENMP )
double t = omp_get_wtime ( ) ;
#endif
GrB_Matrix_reduce_UINT64 (&result, NULL, GrB_PLUS_MONOID_INT64,
A, NULL) ;
#if defined ( _OPENMP )
t = omp_get_wtime ( ) - t ;
if (nthreads == 1) t1 = t ;
printf ("nthreads %3d time: %12.6f speedup %8.2f\n",
nthreads, t, t1/t) ;
#endif
}
printf ("result %" PRId64 "\n", result) ;
// free everyting
GrB_Matrix_free (&A) ;
GrB_finalize ( ) ;
}
|
mkldnn_batch_norm-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_batch_norm.cc
* \brief
* \author Tao Lv
*/
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
#if MXNET_USE_MKLDNN == 1
#include <vector>
#include <utility>
#include <mkldnn.hpp>
#include "../batch_norm-inl.h"
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
typedef mkldnn::batch_normalization_forward::primitive_desc t_bn_f_pdesc;
typedef mkldnn::batch_normalization_forward::desc t_bn_f_desc;
typedef mkldnn::batch_normalization_backward::primitive_desc t_bn_b_pdesc;
typedef mkldnn::batch_normalization_backward::desc t_bn_b_desc;
using mkldnn::use_global_stats;
using mkldnn::use_scale_shift;
using mkldnn::forward_training;
using mkldnn::forward_inference;
inline static unsigned _GetFlags(const std::vector<NDArray> &in_data,
const std::vector<NDArray> &aux_states,
const BatchNormParam ¶m, bool is_train) {
unsigned flags = 0U;
if (in_data.size() == 3U) {
flags |= use_scale_shift;
}
// aux_states[0]: inMean
// aux_states[1]: inVariance
if (aux_states.size() == 2U && !is_train) {
flags |= use_global_stats;
}
return flags;
}
template <typename DType>
inline static t_bn_f_pdesc _GetFwd(const mkldnn::memory &data_mem,
bool is_train,
DType eps,
unsigned flags) {
auto data_mpd = data_mem.get_primitive_desc();
auto data_md = data_mpd.desc();
auto engine = CpuEngine::Get()->get_engine();
if (is_train) {
t_bn_f_desc bnFwd_desc(forward_training, data_md, eps, flags);
return t_bn_f_pdesc(bnFwd_desc, engine);
} else {
t_bn_f_desc bnFwd_desc(forward_inference, data_md, eps, flags);
return t_bn_f_pdesc(bnFwd_desc, engine);
}
}
template <typename DType>
inline static t_bn_b_pdesc _GetBwd(const mkldnn::memory &data_mem,
const mkldnn::memory &diff_mem,
DType eps,
unsigned flags) {
auto data_mpd = data_mem.get_primitive_desc();
auto data_md = data_mpd.desc();
auto diff_mpd = diff_mem.get_primitive_desc();
auto diff_md = diff_mpd.desc();
auto engine = CpuEngine::Get()->get_engine();
t_bn_b_desc bnBwd_desc(mkldnn::prop_kind::backward, diff_md, data_md, eps, flags);
return t_bn_b_pdesc(bnBwd_desc, engine, _GetFwd(data_mem, true, eps, flags));
}
typedef ParamOpSign<BatchNormParam> MKLDNNBNSignature;
class MKLDNNBNForward {
std::shared_ptr<const mkldnn::memory> data_m;
std::shared_ptr<const mkldnn::memory> weight_m;
std::shared_ptr<const mkldnn::memory> out_m;
std::shared_ptr<const mkldnn::memory> mean_m;
std::shared_ptr<const mkldnn::memory> var_m;
std::shared_ptr<mkldnn::batch_normalization_forward> fwd;
bool is_train;
t_bn_f_pdesc pd;
public:
MKLDNNBNForward(const t_bn_f_pdesc &_pd, bool is_train): pd(_pd) {
weight_m.reset(new mkldnn::memory(pd.weights_primitive_desc()));
this->is_train = is_train;
}
const mkldnn::memory &GetWeight() const {
return *weight_m;
}
const t_bn_f_pdesc &GetPd() const {
return pd;
}
const mkldnn::memory &GetMean() const {
return *mean_m;
}
const mkldnn::memory &GetVar() const {
return *var_m;
}
void SetDataHandle(const NDArray &data, const NDArray &mean,
const NDArray &var, const mkldnn::memory &out) {
auto _data = data.GetMKLDNNData();
if (data_m) {
data_m->set_data_handle(_data->get_data_handle());
} else {
data_m.reset(new mkldnn::memory(_data->get_primitive_desc(),
_data->get_data_handle()));
}
if (out_m) {
out_m->set_data_handle(out.get_data_handle());
} else {
out_m.reset(new mkldnn::memory(out.get_primitive_desc(),
out.get_data_handle()));
}
auto mean_ptr = mean.data().dptr_;
if (mean_m) {
mean_m->set_data_handle(mean_ptr);
} else {
mean_m.reset(new mkldnn::memory(pd.mean_primitive_desc(),
mean_ptr));
}
auto var_ptr = var.data().dptr_;
if (var_m) {
var_m->set_data_handle(var_ptr);
} else {
var_m.reset(new mkldnn::memory(pd.variance_primitive_desc(),
var_ptr));
}
if (fwd == nullptr) {
if (!is_train)
fwd.reset(new mkldnn::batch_normalization_forward(
pd, *data_m, mkldnn::primitive::at(*mean_m),
mkldnn::primitive::at(*var_m), *weight_m, *out_m));
else
fwd.reset(new mkldnn::batch_normalization_forward(
pd, mkldnn::primitive::at(*data_m),
mkldnn::primitive::at(*weight_m), *out_m,
*mean_m, *var_m));
}
}
const mkldnn::batch_normalization_forward &GetFwd() const {
return *fwd;
}
};
template<typename DType>
static MKLDNNBNForward &GetBNForward(const BatchNormParam& param,
const OpContext &ctx, const NDArray &in_data,
unsigned flags) {
static thread_local std::unordered_map<MKLDNNBNSignature, MKLDNNBNForward, OpHash> fwds;
MKLDNNBNSignature key(param);
key.AddSign(ctx.is_train);
key.AddSign(in_data);
auto it = fwds.find(key);
if (it == fwds.end()) {
auto fwd_pd = _GetFwd(*in_data.GetMKLDNNData(), ctx.is_train,
(DType) param.eps, flags);
MKLDNNBNForward fwd(fwd_pd, ctx.is_train);
auto ins_ret = fwds.insert(std::pair<MKLDNNBNSignature, MKLDNNBNForward>(
key, fwd));
CHECK(ins_ret.second);
it = ins_ret.first;
}
return it->second;
}
template <typename DType>
void MKLDNNBatchNormForward(const OpContext &ctx, const BatchNormParam ¶m,
const std::vector<NDArray> &in_data,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &out_data,
const std::vector<NDArray> &aux_states) {
TmpMemMgr::Get()->Init(ctx.requested[batchnorm::kTempSpace]);
unsigned flags = _GetFlags(in_data, aux_states, param, ctx.is_train);
const NDArray &data = in_data[batchnorm::kData];
auto &fwd = GetBNForward<DType>(param, ctx, data, flags);
const NDArray &out = out_data[batchnorm::kOut];
// for output memory
auto out_mem = const_cast<NDArray &>(out).CreateMKLDNNData(fwd.GetPd().dst_primitive_desc());
// mxnet will always use scale shift.
// But if fix_gamma is true, then all scale elements will be set to 1.0f
if (flags & use_scale_shift) {
const NDArray &gamma = in_data[batchnorm::kGamma];
const NDArray &beta = in_data[batchnorm::kBeta];
CHECK_EQ(gamma.storage_type(), mxnet::kDefaultStorage);
CHECK_EQ(beta.storage_type(), mxnet::kDefaultStorage);
const mkldnn::memory &weight_mem = fwd.GetWeight();
DType* weight_buf = reinterpret_cast<DType *>(weight_mem.get_data_handle());
nnvm::dim_t channels_ = data.shape()[1];
CHECK(weight_mem.get_primitive_desc().get_size() == channels_ * sizeof(DType) * 2);
DType* weight_ptr = gamma.data().dptr<DType>();
DType* bias_ptr = beta.data().dptr<DType>();
if (!param.fix_gamma) {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = weight_ptr[i];
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
} else if (IsBNWriting(req[batchnorm::kGamma])) {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = (DType)1.0f;
weight_ptr[i] = (DType)1.0f;
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
} else {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = (DType)1.0f;
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
}
if (!ctx.is_train) {
DType* omean = out_data[batchnorm::kMean].data().dptr<DType>();
DType* ovar = out_data[batchnorm::kVar].data().dptr<DType>();
DType* inmean = aux_states[batchnorm::kMovingMean].data().dptr<DType>();
DType* invar = aux_states[batchnorm::kMovingVar].data().dptr<DType>();
// to align with origin implmentation: batch_norm.cc: L164
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
omean[i] = inmean[i];
ovar[i] = VARIANCE_TO_INVSTD(invar[i], param.eps);
}
fwd.SetDataHandle(data, aux_states[batchnorm::kMovingMean],
aux_states[batchnorm::kMovingVar],
*out_mem);
MKLDNNStream::Get()->RegisterPrim(fwd.GetFwd());
MKLDNNStream::Get()->Submit();
} else { // training
const NDArray &outMean = out_data[batchnorm::kMean];
const NDArray &outVar = out_data[batchnorm::kVar];
DType* omean = outMean.data().dptr<DType>();
DType* ovar = outVar.data().dptr<DType>();
fwd.SetDataHandle(data, outMean, outVar, *out_mem);
MKLDNNStream::Get()->RegisterPrim(fwd.GetFwd());
MKLDNNStream::Get()->Submit();
DType* mean_mem_ptr = reinterpret_cast<DType*>(fwd.GetMean().get_data_handle());
DType* var_mem_ptr = reinterpret_cast<DType*>(fwd.GetVar().get_data_handle());
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
omean[i] = mean_mem_ptr[i];
ovar[i] = VARIANCE_TO_INVSTD(var_mem_ptr[i], param.eps);
}
}
} else { // no input gamma and beta
LOG(FATAL) << "MKLDNN batch normalization: should not reach here ...";
}
}
template <typename DType>
void MKLDNNBatchNormBackward(const OpContext &ctx, const BatchNormParam ¶m,
const std::vector<NDArray> &out_grad,
const std::vector<NDArray> &in_data,
const std::vector<NDArray> &out_data,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &in_grad,
const std::vector<NDArray> &aux_states) {
TmpMemMgr::Get()->Init(ctx.requested[batchnorm::kTempSpace]);
CHECK_EQ(out_grad.size(), 1U);
CHECK_EQ(in_data.size(), 3U);
CHECK_EQ(out_data.size(), 3U);
CHECK_EQ(in_grad.size(), 3U);
unsigned flags = _GetFlags(in_data, aux_states, param, ctx.is_train);
const NDArray &data = in_data[batchnorm::kData];
const NDArray &diff = out_grad[batchnorm::kOut];
const NDArray &gradIn = in_grad[batchnorm::kData];
const NDArray &moving_mean = aux_states[batchnorm::kMovingMean];
const NDArray &moving_var = aux_states[batchnorm::kMovingVar];
const NDArray &out_mean = out_data[batchnorm::kMean];
const NDArray &out_var = out_data[batchnorm::kVar];
CHECK(out_mean.IsDefaultData());
CHECK(out_var.IsDefaultData());
CHECK(moving_mean.IsDefaultData());
CHECK(moving_var.IsDefaultData());
auto data_mem = data.GetMKLDNNData();
auto diff_mem = diff.GetMKLDNNData();
// MKLDNN batchnorm should run on special layouts. If one of them isn't, we
// should reorder them.
if (data.IsDefaultData())
data_mem = data.GetMKLDNNDataReorder(diff_mem->get_primitive_desc());
else if (diff.IsDefaultData())
diff_mem = diff.GetMKLDNNDataReorder(data_mem->get_primitive_desc());
auto bwd_pd = _GetBwd(*data_mem, *diff_mem, param.eps, flags);
auto gradi_mem = const_cast<NDArray &>(gradIn).CreateMKLDNNData(data_mem->get_primitive_desc());
if (flags & use_scale_shift) {
const NDArray &gamma = in_data[batchnorm::kGamma];
const NDArray &beta = in_data[batchnorm::kBeta];
// TODO(tao): how to reuse this memory?
std::shared_ptr<const mkldnn::memory> weight_mem(
new mkldnn::memory(bwd_pd.weights_primitive_desc()));
DType* weight_buf = reinterpret_cast<DType *>(weight_mem->get_data_handle());
nnvm::dim_t channels_ = data.shape()[1];
for (int i = 0; i < channels_; i++) {
if (!param.fix_gamma)
weight_buf[i] = (gamma.data().dptr<DType>())[i]; // weight
else
weight_buf[i] = (DType)1.0f;
}
for (int i = 0; i < channels_; i++) {
weight_buf[channels_ + i] = (beta.data().dptr<DType>())[i]; // bias
}
std::shared_ptr<const mkldnn::memory> gradw_mem(
new mkldnn::memory(bwd_pd.diff_weights_primitive_desc()));
// training but no input mean and variance
if (ctx.is_train && !param.use_global_stats) {
DType* moving_mean_ptr = reinterpret_cast<DType *>(moving_mean.data().dptr<DType>());
DType* moving_var_ptr = reinterpret_cast<DType *>(moving_var.data().dptr<DType>());
DType* out_mean_ptr = reinterpret_cast<DType *>(out_mean.data().dptr<DType>());
DType* out_var_ptr = reinterpret_cast<DType *>(out_var.data().dptr<DType>());
mkldnn::memory var_mem(bwd_pd.variance_primitive_desc());
DType *tmp_var_ptr = reinterpret_cast<DType *>(var_mem.get_data_handle());
DType minus_mom = (1.0f - param.momentum);
for (int i = 0; i < channels_; i++) {
moving_mean_ptr[i] = moving_mean_ptr[i] * param.momentum +
out_mean_ptr[i] * minus_mom;
float variance = INVSTD_TO_VARIANCE(out_var_ptr[i], param.eps);
tmp_var_ptr[i] = variance;
moving_var_ptr[i] = moving_var_ptr[i] * param.momentum +
variance * minus_mom;
}
std::shared_ptr<const mkldnn::memory> out_mean_mem(
new mkldnn::memory(bwd_pd.mean_primitive_desc(), out_mean_ptr));
std::shared_ptr<const mkldnn::memory> out_var_mem(
new mkldnn::memory(bwd_pd.variance_primitive_desc(), out_var_ptr));
auto bn_bwd = mkldnn::batch_normalization_backward(bwd_pd,
*data_mem,
mkldnn::primitive::at(*out_mean_mem),
mkldnn::primitive::at(var_mem),
*diff_mem,
*weight_mem,
*gradi_mem,
*gradw_mem);
MKLDNNStream::Get()->RegisterPrim(bn_bwd);
MKLDNNStream::Get()->Submit();
} else {
std::shared_ptr<const mkldnn::memory> imean_mem(
new mkldnn::memory(bwd_pd.mean_primitive_desc(),
moving_mean.data().dptr<DType>()));
std::shared_ptr<const mkldnn::memory> ivar_mem(
new mkldnn::memory(bwd_pd.variance_primitive_desc(),
moving_var.data().dptr<DType>()));
auto bn_bwd = mkldnn::batch_normalization_backward(bwd_pd,
*data_mem,
mkldnn::primitive::at(*imean_mem),
mkldnn::primitive::at(*ivar_mem),
*diff_mem,
*weight_mem,
*gradi_mem,
*gradw_mem);
MKLDNNStream::Get()->RegisterPrim(bn_bwd);
MKLDNNStream::Get()->Submit();
}
// copy data from gradw_mem to in_grad[1] and in_grad[2]
DType* gw_buf = reinterpret_cast<DType *>(gradw_mem->get_data_handle());
for (int i = 0; i < channels_; i++) {
if (!param.fix_gamma)
(in_grad[1].data().dptr<DType>())[i] = gw_buf[i];
else
(in_grad[1].data().dptr<DType>())[i] = 0.0f;
}
for (int i = 0; i < channels_; i++) {
(in_grad[2].data().dptr<DType>())[i] = gw_buf[i + channels_];
}
} else {
LOG(FATAL) << "MKLDNN batch normalization backward: should not reach here ...";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
|
interpolation_pl.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_pl_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | o | o |
// +---+---+---+---+
// | | x | x | |
//
// CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
// piecewise linear interpolation... NOTE, BC's must have been previously applied
write[write_ijk] = prescale_f*write[write_ijk] +
0.421875*read[read_ijk ] +
0.140625*read[read_ijk +delta_k] +
0.140625*read[read_ijk +delta_j ] +
0.046875*read[read_ijk +delta_j+delta_k] +
0.140625*read[read_ijk+delta_i ] +
0.046875*read[read_ijk+delta_i +delta_k] +
0.046875*read[read_ijk+delta_i+delta_j ] +
0.015625*read[read_ijk+delta_i+delta_j+delta_k];
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise linear interpolation
void interpolation_pl(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,0);
apply_BCs_linear(level_c,id_c,0);
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_pl_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_pl_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
stream.c | // Copyright 2009-2021 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2021, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("\n\n\nHello CramSim!!!!\n");
printf("Run a stream application with ariel and cramsim\n");
printf("------------------------------------------------------\n");
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
convolution_sgemm_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __riscv_vector
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
#endif
// Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
#if __riscv_vector
if (size >= packn)
tmp.create(packn * maxk, inch, size / packn + size % packn, 2u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
{
int nn_size = size / packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * packn;
__fp16* tmpptr = tmp.channel(i / packn);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vse16_v_f16m1(tmpptr, vle16_v_f16m1(img0, vl), vl);
img0 += size;
tmpptr += packn;
}
}
}
int remain_size_start = nn_size * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / packn + i % packn);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#else // __riscv_vector
tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#endif // __riscv_vector
#if __riscv_vector
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
__fp16* outptr2 = top_blob.channel(p + 2);
__fp16* outptr3 = top_blob.channel(p + 3);
__fp16* outptr4 = top_blob.channel(p + 4);
__fp16* outptr5 = top_blob.channel(p + 5);
__fp16* outptr6 = top_blob.channel(p + 6);
__fp16* outptr7 = top_blob.channel(p + 7);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(biasptr[0], vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(biasptr[1], vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(biasptr[2], vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(biasptr[3], vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(biasptr[4], vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(biasptr[5], vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(biasptr[6], vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(biasptr[7], vl);
for (int q = 0; q < nn; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(tmpptr, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, kptr[3], _val, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, kptr[4], _val, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, kptr[5], _val, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, kptr[6], _val, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, kptr[7], _val, vl);
tmpptr += packn;
kptr += 8;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
vse16_v_f16m1(outptr2, _sum2, vl);
vse16_v_f16m1(outptr3, _sum3, vl);
vse16_v_f16m1(outptr4, _sum4, vl);
vse16_v_f16m1(outptr5, _sum5, vl);
vse16_v_f16m1(outptr6, _sum6, vl);
vse16_v_f16m1(outptr7, _sum7, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
outptr4 += packn;
outptr5 += packn;
outptr6 += packn;
outptr7 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = biasptr[0];
__fp16 sum1 = biasptr[1];
__fp16 sum2 = biasptr[2];
__fp16 sum3 = biasptr[3];
__fp16 sum4 = biasptr[4];
__fp16 sum5 = biasptr[5];
__fp16 sum6 = biasptr[6];
__fp16 sum7 = biasptr[7];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
sum4 += tmpptr[0] * kptr[4];
sum5 += tmpptr[0] * kptr[5];
sum6 += tmpptr[0] * kptr[6];
sum7 += tmpptr[0] * kptr[7];
tmpptr++;
kptr += 8;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr4[0] = sum4;
outptr5[0] = sum5;
outptr6[0] = sum6;
outptr7[0] = sum7;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
__fp16* outptr2 = top_blob.channel(p + 2);
__fp16* outptr3 = top_blob.channel(p + 3);
const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(biasptr[0], vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(biasptr[1], vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(biasptr[2], vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(biasptr[3], vl);
for (int q = 0; q < nn; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(tmpptr, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], _val, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, kptr[1], _val, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, kptr[2], _val, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, kptr[3], _val, vl);
tmpptr += packn;
kptr += 4;
}
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
vse16_v_f16m1(outptr2, _sum2, vl);
vse16_v_f16m1(outptr3, _sum3, vl);
outptr0 += packn;
outptr1 += packn;
outptr2 += packn;
outptr3 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = biasptr[0];
__fp16 sum1 = biasptr[1];
__fp16 sum2 = biasptr[2];
__fp16 sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + (packn - 1) < size; i += packn)
{
const __fp16* tmpptr = tmp.channel(i / packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(bias0, vl);
for (int q = 0; q < nn; q++)
{
_sum0 = vfmacc_vf_f16m1(_sum0, kptr[0], vle16_v_f16m1(tmpptr, vl), vl);
tmpptr += packn;
kptr++;
}
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / packn + i % packn);
const __fp16* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#else // __riscv_vector
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
for (int i = 0; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i);
const __fp16* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__fp16 sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_transform_kernel_fp16sa_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-maxk-inch-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __riscv_vector
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
__fp16* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
__fp16* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00 += 4;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
__fp16* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = (__fp16)k00[k];
g00 += 1;
}
}
}
#else
kernel_tm = kernel;
#endif // __riscv_vector
}
static void convolution_im2col_sgemm_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
GB_unaryop__abs_uint8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_int32
// op(A') function: GB_tran__abs_uint8_int32
// C type: uint8_t
// A type: int32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_int32
(
uint8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_async.c |
/**
*
* @file runtime_async.c
*
* @copyright 2012-2017 The University of Tennessee and The University of
* Tennessee Research Foundation. All rights reserved.
* @copyright 2012-2017 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
* Univ. Bordeaux. All rights reserved.
* @copyright 2018 King Abdullah University of Science and Technology (KAUST).
* All rights reserved.
*
* @brief AL4san OpenMP sequence source codes
*
* AL4SAN is a software package provided by King Abdullah University of Science and Technology (KAUST)
*
*
* @author Reazul Hoque
* @author Mathieu Faverge
* @date 2017-01-12
* @version 1.1.0
* @author Rabab Alomairy
* @date 2018-10-18
*/
#include <stdlib.h>
#include "al4san_openmp.h"
/*******************************************************************************
* Wait for the completion of a sequence
**/
int AL4SAN_Openmp_sequence_wait( AL4SAN_context_t *al4san,
AL4SAN_sequence_t *sequence )
{
(void)al4san;
(void)sequence;
#pragma omp taskwait
return AL4SAN_SUCCESS;
}
/*******************************************************************************
* Terminate a sequence
**/
void AL4SAN_Openmp_sequence_flush( AL4SAN_context_t *al4san,
AL4SAN_sequence_t *sequence,
AL4SAN_request_t *request,
int status)
{
(void)al4san;
sequence->request = request;
sequence->status = status;
request->status = status;
#pragma omp taskwait
// #pragma omp flush
return;
}
|
ccl_fftlog.c | #include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <fftw3.h>
#include <gsl/gsl_sf_result.h>
#include <gsl/gsl_sf_gamma.h>
#include "ccl.h"
/****************************************************************
This is the famous FFTLog.
First imlplemented by the living legend Andrew Hamilton:
http://casa.colorado.edu/~ajsh/FFTLog/
This version is a C version that was adapted from the C++ version found
in Copter JWG Carlson, another big loss for the cosmology community.
https://github.com/jwgcarlson/Copter
I've transformed this from C++ to C99 as the lowest common denominator
and provided bindings for C++ and python.
These are the C++ bindings
*****************************************************************/
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifndef M_LN2
#define M_LN2 0.69314718056
#endif
/* This code is FFTLog, which is described in arXiv:astro-ph/9905191 */
static double complex lngamma_fftlog(double complex z)
{
gsl_sf_result lnr, phi;
gsl_sf_lngamma_complex_e(creal(z), cimag(z), &lnr, &phi);
return lnr.val + I*phi.val;
}
static double complex polar (double r, double phi)
{
return (r*cos(phi) +I*(r*sin(phi)));
}
static void lngamma_4(double x, double y, double* lnr, double* arg)
{
double complex w = lngamma_fftlog(x+y*I);
if(lnr) *lnr = creal(w);
if(arg) *arg = cimag(w);
}
static double goodkr(int N, double mu, double q, double L, double kr)
{
double xp = (mu+1+q)/2;
double xm = (mu+1-q)/2;
double y = M_PI*N/(2*L);
double lnr, argm, argp;
lngamma_4(xp, y, &lnr, &argp);
lngamma_4(xm, y, &lnr, &argm);
double arg = log(2/kr) * N/L + (argp + argm)/M_PI;
double iarg = round(arg);
if(arg != iarg)
kr *= exp((arg - iarg)*L/N);
return kr;
}
/* Pre-compute the coefficients that appear in the FFTLog implementation of
* the discrete Hankel transform. The parameters N, mu, and q here are the
* same as for the function fht(). The parameter L is defined (for whatever
* reason) to be N times the logarithmic spacing of the input array, i.e.
* L = N * log(r[N-1]/r[0])/(N-1) */
static void compute_u_coefficients(int N, double mu, double q, double L, double kcrc, double complex *u)
{
double y = M_PI/L;
double k0r0 = kcrc * exp(-L);
double t = -2*y*log(k0r0/2);
if(q == 0) {
double x = (mu+1)/2;
double lnr, phi;
for(int m = 0; m <= N/2; m++) {
lngamma_4(x, m*y, &lnr, &phi);
u[m] = polar(1.0,m*t + 2*phi);
}
}
else {
double xp = (mu+1+q)/2;
double xm = (mu+1-q)/2;
double lnrp, phip, lnrm, phim;
for(int m = 0; m <= N/2; m++) {
lngamma_4(xp, m*y, &lnrp, &phip);
lngamma_4(xm,-m*y, &lnrm, &phim);
u[m] = polar(exp(q*M_LN2 + lnrp - lnrm), m*t + phip - phim);
}
}
for(int m = N/2+1; m < N; m++)
u[m] = conj(u[N-m]);
if((N % 2) == 0)
u[N/2] = (creal(u[N/2]) + I*0.0);
}
/* Compute the discrete Hankel transform of the function a(r). See the FFTLog
* documentation (or the Fortran routine of the same name in the FFTLog
* sources) for a description of exactly what this function computes.
* If u is NULL, the transform coefficients will be computed anew and discarded
* afterwards. If you plan on performing many consecutive transforms, it is
* more efficient to pre-compute the u coefficients. */
static void fht(int npk, int N,
double *k, double **pk,
double *r, double **xi,
double dim, double mu, double q, double kcrc,
int noring, double complex* u, int *status)
{
fftw_plan forward_plan, reverse_plan;
double L = log(k[N-1]/k[0]) * N/(N-1.);
double complex* ulocal = NULL;
if(u == NULL) {
if(noring)
kcrc = goodkr(N, mu, q, L, kcrc);
ulocal = malloc (sizeof(complex double)*N);
if(ulocal==NULL)
*status=CCL_ERROR_MEMORY;
if(*status == 0) {
compute_u_coefficients(N, mu, q, L, kcrc, ulocal);
u = ulocal;
}
}
fftw_complex* a_tmp;
fftw_complex* b_tmp;
if(*status == 0) {
a_tmp = fftw_alloc_complex(N);
if(a_tmp==NULL)
*status=CCL_ERROR_MEMORY;
}
if(*status == 0) {
b_tmp = fftw_alloc_complex(N);
if(b_tmp==NULL)
*status=CCL_ERROR_MEMORY;
}
if(*status == 0) {
/* Compute the convolution b = a*u using FFTs */
forward_plan = fftw_plan_dft_1d(N,
(fftw_complex*) a_tmp,
(fftw_complex*) b_tmp,
-1, FFTW_ESTIMATE);
reverse_plan = fftw_plan_dft_1d(N,
(fftw_complex*) b_tmp,
(fftw_complex*) b_tmp,
+1, FFTW_ESTIMATE);
}
if(*status == 0) {
#pragma omp parallel default(none) \
shared(npk, N, k, pk, r, xi, \
dim, mu, q, kcrc, u, status, \
forward_plan, reverse_plan, \
L, ulocal)
{
int local_status = 0;
double *prefac_pk=NULL;
if(local_status == 0) {
prefac_pk = malloc(N*sizeof(double));
if(prefac_pk==NULL)
local_status=CCL_ERROR_MEMORY;
}
double *prefac_xi=NULL;
if(local_status == 0) {
prefac_xi = malloc(N*sizeof(double));
if(prefac_xi==NULL)
local_status=CCL_ERROR_MEMORY;
}
fftw_complex* a=NULL;
fftw_complex* b=NULL;
if(local_status == 0) {
a = fftw_alloc_complex(N);
if(a==NULL)
local_status=CCL_ERROR_MEMORY;
}
if(local_status == 0) {
b = fftw_alloc_complex(N);
if(b==NULL)
local_status=CCL_ERROR_MEMORY;
}
if(local_status == 0) {
for(int i = 0; i < N; i++)
prefac_pk[i] = pow(k[i], dim/2-q);
/* Compute k's corresponding to input r's */
double k0r0 = kcrc * exp(-L);
r[0] = k0r0/k[0];
for(int n = 1; n < N; n++)
r[n] = r[0] * exp(n*L/N);
double one_over_2pi_dhalf = pow(2*M_PI,-dim/2);
for(int i = 0; i < N; i++)
prefac_xi[i] = one_over_2pi_dhalf * pow(r[i], -dim/2-q);
#pragma omp for
for(int j = 0; j < npk; j++) {
for(int i = 0; i < N; i++)
a[i] = prefac_pk[i] * pk[j][i];
fftw_execute_dft(forward_plan,a,b);
for(int m = 0; m < N; m++)
b[m] *= u[m] / (double)(N); // divide by N since FFTW doesn't normalize the inverse FFT
fftw_execute_dft(reverse_plan,b,b);
/* Reverse b array */
double complex tmp;
for(int n = 0; n < N/2; n++) {
tmp = b[n];
b[n] = b[N-n-1];
b[N-n-1] = tmp;
}
for(int i = 0; i < N; i++)
xi[j][i] = prefac_xi[i] * creal(b[i]);
}
}
free(prefac_pk);
free(prefac_xi);
fftw_free(a);
fftw_free(b);
if (local_status) {
#pragma omp atomic write
*status = local_status;
}
} //end omp parallel
}
if(*status == 0) {
fftw_destroy_plan(forward_plan);
fftw_destroy_plan(reverse_plan);
}
free(ulocal);
//TODO: free this up
fftw_free(a_tmp);
fftw_free(b_tmp);
}
void ccl_fftlog_ComputeXi2D(double mu, double epsilon,
int npk, int N, double *l,double **cl,
double *th, double **xi, int *status)
{
fht(npk, N, l, cl, th, xi, 2., mu, epsilon, 1, 1, NULL, status);
}
void ccl_fftlog_ComputeXi3D(double l, double epsilon,
int npk, int N, double *k, double **pk,
double *r, double **xi, int *status)
{
fht(npk, N, k, pk, r, xi, 3., l+0.5, epsilon, 1, 1, NULL, status);
}
|
conv_kernel_rv64.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_kernel_rv64.h"
// #include "wino_conv_kernel_arm.h" // FIXME: add wino support
#ifdef __aarch64__
// #include "wino_conv_kernel_1_arm.h" // FIXME: add wino support
#endif
#define PER_OUT_CHAN 16
void sgemm_4x16_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
void sgemm_4x4_a72(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
void im2col_fp32_1x1(float* input, int input_xy, float* col, int col_cnt, int input_chan);
void im2col_fp32_3x3(float* input, int w, int h, int channel, float* cur_col, int stride);
static void interleave_kernel(float* kernel, float* kernel_interleaved, int kernel_chan, int kernel_size)
{
int i, j, k;
float* cur_kernel[PER_OUT_CHAN];
float* cur_kernel_interleaved = kernel_interleaved;
// interleave PER_OUT_CHAN kernels
for (i = 0; i + PER_OUT_CHAN - 1 < kernel_chan; i += PER_OUT_CHAN)
{
for (k = 0; k < PER_OUT_CHAN; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < PER_OUT_CHAN; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
for (; i < (kernel_chan & -4); i += 4)
{
for (k = 0; k < 4; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 4; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
// last 4 kernel
for (k = 0; k < 3; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
if ((kernel_chan & 0x3) == 3)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 3; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 2)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 2; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 1)
{
for (j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel[0][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
}
/* kernel interleave */
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param)
{
int group = param->group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size_algin = kernel_size * out_chan_align4;
int kernel_size_group = kernel_size * out_chan;
float* kernel = filter->data;
float* interleave_buf = priv_info->interleave_buffer;
for (int g = 0; g < group; g++)
{
float* cur_kernel = kernel + g * kernel_size_group;
float* cur_interleave = interleave_buf + g * kernel_size_algin;
interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size);
}
}
static void im2col(float* input, float* col, int in_c, int in_w, int in_h, int k_w, int k_h, int s_w, int s_h, int d_w,
int d_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int out_w, int out_h, int num_thread)
{
if (k_w == 1 && k_h == 1 && s_w == 1 && s_h == 1)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int out_xy = out_w * out_h;
int col_end3 = out_xy & 3;
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < out_xy - 3; col_i += 4)
{
float* cur_col = col + col_i * kernel_size;
float* cur_input = input + col_i;
im2col_fp32_1x1(cur_input, in_xy, cur_col, 4, in_c); // FIXME: add im2col 1x1
}
int col_i = out_xy & -4;
float* cur_col;
// final 4 input
if (col_end3)
{
cur_col = col + col_i * kernel_size;
for (int col_j = 0; col_j < kernel_size; col_j++)
{
for (int i = 0; i < 4; i++)
{
if (i < col_end3)
*cur_col++ = *(input + col_j * in_xy + col_i + i);
else
*cur_col++ = 0;
}
}
}
}
else if (d_w == 1 && d_h == 1 && k_w == 3 && k_h == 3 && s_w == s_h)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int out_xy = out_w * out_h;
int col_end3 = out_xy & 3;
int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0);
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < (out_xy & -4); col_i += 4)
{
float* cur_col = col + col_i * kernel_size;
int imy0 = col_i / out_w;
int imy3 = (col_i + 3) / out_w;
int imx0 = col_i - imy0 * out_w;
int imx3 = (col_i + 3) - imy3 * out_w;
if ((imy0 == imy3) && (is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (out_h - 1) && imx3 != (out_w - 1))))
{
float* l0 = input + (imy0 * s_h - pad_h0) * in_w + (imx0 * s_w - pad_w0);
{
// im2col_fp32_3x3(l0, in_w, in_h, in_c, cur_col, s_w); // add im2col 3x3
cur_col += 4 * kernel_size;
}
}
else
{
int cnt_y[4] = {imy0, (col_i + 1) / out_w, (col_i + 2) / out_w, imy3};
int cnt_x[4] = {imx0, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, imx3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < 3; ky++)
for (int kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
// final 4 input
int col_i = out_xy & -4;
if (col_end3)
{
float* cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
{
for (int ky = 0; ky < 3; ky++)
{
for (int kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
}
else
{
int out_xy = out_w * out_h;
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < out_xy - 3; col_i += 4)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int col_end3 = out_xy & 3;
float* cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < (k_h * d_h); ky += d_h)
for (int kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
int col_i = out_xy & -4;
float* cur_col;
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int col_end3 = out_xy & 3;
if (col_end3)
{
cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < (k_h * d_h); ky += d_h)
for (int kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
static void sgemm_set(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start,
int ch_end, int output_xy, int activation, int num_thread, int cpu_affinity)
{
int nn_outch = ch_end / PER_OUT_CHAN;
int col_end3 = output_xy & 0x3;
if (col_end3)
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? ( float* )(biases + p) : NULL;
float* kernel_tmp = ( float* )(kernel + p * kernel_size);
float* output_tmp = ( float* )(output + p * output_xy);
int col_line = 0;
for (col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
float* col_tmp = ( float* )(col + col_line * kernel_size);
// sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); // FIXME: replace with sgemm_4x16_rv64
}
{
float result[64];
float* col_tmp = ( float* )(col + col_line * kernel_size);
// sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0); // FIXME: replace with sgemm_4x16_rv64
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? ( float* )(biases + p) : NULL;
float* kernel_tmp = ( float* )(kernel + p * kernel_size);
float* output_tmp = ( float* )(output + p * output_xy);
for (int col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
float* col_tmp = ( float* )(col + col_line * kernel_size);
// sgemm_4x16_a72(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); // FIXME: replace with sgemm_4x16_rv64
}
}
}
}
static void sgemm4x4(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start, int ch_end,
int output_xy, int activation, int num_thread, int cpu_affinity)
{
float result[16];
int col_end3 = output_xy & 0x3;
int kernel_end3 = ch_end & 0x3;
#pragma omp parallel for num_threads(num_thread) private(result)
for (int kernel_num = ch_start; kernel_num < ((ch_end & -4)-3); kernel_num += 4)
{
float* cur_biases = NULL;
float *cur_col, *cur_kernel, *cur_output;
int col_line;
if (biases)
cur_biases = ( float* )(biases + kernel_num);
cur_kernel = ( float* )(kernel + kernel_num * kernel_size);
cur_output = ( float* )(output + kernel_num * output_xy);
for (col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = ( float* )(col + col_line * kernel_size);
// sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0); // FIXME: replace with sgemm_4x16_rv64
}
if (col_end3)
{
cur_col = ( float* )(col + col_line * kernel_size);
// sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); // FIXME: replace with sgemm_4x4_rv64
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
if (kernel_end3)
{
int kernel_num = (ch_end & -4);
float* cur_biases = NULL;
if (biases)
cur_biases = ( float* )(biases + kernel_num);
float* cur_kernel = ( float* )(kernel + kernel_num * kernel_size);
#pragma omp parallel for num_threads(num_thread) private(result)
for (int col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
float* cur_col = ( float* )(col + col_line * kernel_size);
// sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); // FIXME: replace with sgemm_4x4_rv64
for (int i = 0; i < kernel_end3; i++)
for (int j = 0; j < 4; j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
int col_line = output_xy & -4;
if (col_end3)
{
float* cur_col = ( float* )(col + col_line * kernel_size);
// sgemm_4x4_a72(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0); // FIXME: replace with sgemm_4x4_rv64
for (int i = 0; i < (kernel_end3); i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int output_chan = param->output_channel;
int group = param->group;
if (in_h < 7 && in_w < 7)
return 0;
if (in_h < 10 && in_w < 10 && output_chan < 16)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3)
return 0;
if (dilation_h != 1 || dilation_w != 1 || stride_h != 1 || stride_w != 1)
return 0;
return 1;
}
/*
* get the memory size for im2col of input tensor
*/
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int out_cstep = out_h * out_w; // channel cstep, output_h * output_w
int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes
out_cstep = (out_cstep + 3) / 4 * 4;
int mem_size = elem_size * kernel_size * out_cstep + 128;
return mem_size;
}
/*
* get the memory size for im2col + sgemm of kernel tensor interleave
*/
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution
return mem_size;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 0;
priv_info->im2col_buffer_pack4 = NULL;
priv_info->im2col_buffer_pack4_size = 0;
return 0;
}
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
return 0;
}
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
#ifdef __aarch64__
if(in_c >= 256)
// return wino_conv_hcl_prerun_1(input_tensor, filter_tensor, output_tensor, priv_info, param); // FIXME: add wino support
else
#endif
// return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); // FIXME: add wino support
}
/* alloc mem of im2col */
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
/* alloc mem of kernel interleave */
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
/* kernel interleave */
interleave(filter_tensor, priv_info, param);
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
// wino_conv_hcl_postrun(priv_info); // FIXME: add wino support
}
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
if (priv_info->winograd)
{
#ifdef __aarch64__
if(in_c >= 256)
// return wino_conv_hcl_run_1(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); // FIXME: add wino support
else
#endif
// return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); // FIXME: add wino support
}
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor != NULL)
biases_buf = ( float* )bias_tensor->data;
float* col_buf = ( float* )priv_info->im2col_buffer;
float* interleave_buf = ( float* )priv_info->interleave_buffer;
int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN;
int sgemm_set_remain = out_c % PER_OUT_CHAN;
for (int n = 0; n < batch; n++) // batch size
{
for (int g = 0; g < group; g++)
{
/* im2col */
float* cur_input = input_buf + n * input_image_size + g * input_size;
im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h,
pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread);
/* gemm */
float* cur_kernel = interleave_buf + g * kernel_size * out_c_align;
float* cur_output = output_buf + n * output_image_size + g * output_size;
float* cur_bias = biases_buf ? (biases_buf + g * out_c) : NULL;
sgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, 0, sgemm_set_chan, out_hw, act_type,
num_thread, cpu_affinity);
if (sgemm_set_remain)
sgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, sgemm_set_chan, out_c, out_hw,
act_type, num_thread, cpu_affinity);
}
}
return 0;
}
|
nvptx_device_math_complex.c | // REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s
// expected-no-diagnostics
// CHECK-DAG: call { float, float } @__divsc3(
// CHECK-DAG: call { float, float } @__mulsc3(
void test_scmplx(float _Complex a) {
#pragma omp target
{
(void)(a * (a / a));
}
}
// CHECK-DAG: call { double, double } @__divdc3(
// CHECK-DAG: call { double, double } @__muldc3(
void test_dcmplx(double _Complex a) {
#pragma omp target
{
(void)(a * (a / a));
}
}
|
arraytools.h | /** \file arraytools.h
\brief Contains the array_link class and related classes.
This file contains method and classes to work with (orthogonal) arrays.
Author: Pieter Eendebak <pieter.eendebak@gmail.com>
Copyright: See LICENSE.txt file that comes with this distribution
*/
#pragma once
#ifdef WIN32
#define _CRT_SECURE_NO_DEPRECATE
#pragma warning(disable : 4996)
#pragma warning(disable : 4018)
#pragma warning(disable : 4244)
#endif
#ifdef WIN32
#ifdef FULLPACKAGE
#include "msstdint.h"
#endif
#else
#ifdef _WIN32 // || __CYGWIN__
// No visual studio!
#ifdef FULLPACKAGE
#ifndef int32_t
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
#endif
#ifndef uint64_t
typedef(unsigned __int64) uint64_t;
#endif
#endif
#else
// assume zlib is present on unix
#ifdef NOZLIB
#else
#ifdef FULLPACKAGE
#ifndef USEZLIB
#define USEZLIB 1
#endif
#endif
#endif
#endif
#endif
#ifdef FULLPACKAGE
#include <iostream>
#endif
#include <assert.h>
#include <deque>
#include <fstream>
#include <iomanip>
#include <ostream>
#include <sstream>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <map>
#include <stdexcept>
#include <Eigen/Core>
#include "printfheader.h"
void throw_runtime_exception (const std::string exception_message); // forward declaration to throw_runtime_exception in tools.cpp
// float types used for Eigen calculations
typedef Eigen::MatrixXd MatrixFloat;
typedef Eigen::ArrayXd ArrayFloat;
typedef Eigen::VectorXd VectorFloat;
typedef double eigenFloat;
/** Print information about an Eigen matrix
*
* \param m Matrix about which to print information
* \param str String to prepend in output
* \param verbose Verbosity level
*/
void eigenInfo (const MatrixFloat m, const char *str = "eigen", int verbose = 1);
/** Print Eigen matrix to stdout */
void print_eigen_matrix(const MatrixFloat matrix);
// helper function for Python interface
void eigen2numpyHelper (double *pymat1, int n, const MatrixFloat &m);
#ifdef USEZLIB
#include <zlib.h>
#endif
#include "mathtools.h"
#include "oaoptions.h"
#ifdef FULLPACKAGE
#include "bitarray/bit_array.h"
#include "md5.h"
#endif
extern "C" {}
/// data type for elements of orthogonal arrays
typedef short int array_t;
/// constant version of array_t
typedef const short int carray_t;
/* change definition below together with array_t !!!! */
#define MPI_ARRAY_T MPI_SHORT
/*other options for MPI_ARRAY_T are: char: MPI_CHAR, short: MPI_SHORT, int: MPI_INT, long: MPI_LONG */
typedef short int rowindex_t; /** type used for row indexing */
typedef int colindex_t; /** type used for column indexing */
typedef const int const_colindex_t; /** constant version of type used for column indexing */
/// pointer to array
typedef array_t *array_p;
/// pointer to constant array
typedef carray_t *carray_p;
typedef rowindex_t *rowperm_t; /** type of row permutation */
typedef colindex_t *colperm_t; /** type of column permutation */
typedef array_t *levelperm_t; /** type of level permutation */
// used to calculate the value (index) of values in a column combination
// this index is used in the strength calculations
// maximum value if of order max(s)*t
typedef int vindex_t; /* value index type */
/// return size in bytes of array_t type
int sizeof_array_t ();
/// return size in bytes of double type
int sizeof_double ();
/// possible values for J-values of 2-level design
inline std::vector< int > possible_F_values (int N, int strength) {
int x = pow ((double)2, strength + 1);
int nn = floor ((double)N / x) + 1;
std::vector< int > Fv (nn);
for (int i = 0; i < nn; i++) {
Fv[i] = N - x * i;
}
return Fv;
}
/// return true if the specified file exists
bool file_exists (const std::string filename);
/// return true if the specified file exists
bool file_exists (const char *filename);
/// return true if the specified oa file exists
bool oa_file_exists (const char *filename);
/// return true if the specified oa file exists
bool oa_file_exists (const std::string filename);
enum ordering_t {
/// lexicograph minimal by columns ordering
ORDER_LEX,
/// J5 based ordering
ORDER_J5
};
struct array_link;
/** @brief Specifies a class of arrays
*
* The specification includes the number of rows, number of columns, factor levels and strength.
*/
struct arraydata_t {
/// number of runs
rowindex_t N;
/// total number of columns (factors) in the design
colindex_t ncols;
/// strength of the design
colindex_t strength;
/// pointer to factor levels of the array
array_t *s;
/// Ordering used for arrays
ordering_t order;
/* derived data */
/// number of groups of columns with the same number of levels
colindex_t ncolgroups;
/// specifies for each column the index of the column group
colindex_t *colgroupindex;
/// specifies for each column the size of the column group
colindex_t *colgroupsize;
/// index of the array
int oaindex;
public:
/** Specifies a class of orthogonal arrays
*
* The specification includes the number of rows, number of columns, factor levels and strength.
*
* An orthogonal array of strength t, N runs, k factors (columns) and factor levels s[i] is an N times k array with
* symbols 0, 1, ..., s[i]-1 in column i such that for every t columns every t-tuple of elements occurs equally often.
*/
arraydata_t();
/**
* @copydoc arraydata_t::arraydata_t()
*
* \param s Factor levels
* \param N Number of rows
* \param strength Strength for class
* \param ncols Number of columns for the class
*/
arraydata_t (array_t s, rowindex_t N, colindex_t strength, colindex_t ncols);
/**
* @copydoc arraydata_t::arraydata_t()
*
* \param s Factor levels
* \param N Number of rows
* \param strength Strength for class
* \param ncols Number of columns for the class
*/
arraydata_t (const std::vector< int > s, rowindex_t N, colindex_t strength, colindex_t ncols);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const array_t *s_, rowindex_t N, colindex_t strength, colindex_t ncols);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const arraydata_t &adp);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const arraydata_t *adp, colindex_t newncols);
~arraydata_t ();
arraydata_t& operator= (const arraydata_t &ad2);
int operator== (const arraydata_t &ad2);
/// return true if the class represents mixed-level arrays
bool ismixed () const;
/// return true if the class represents a 2-level array
bool is2level () const;
/// return random array from the class. this operation is only valid for strength 0 or 1
array_link randomarray (int strength = 0, int ncols = -1) const;
/** @brief Write file with specification of orthognal array class
*
* @param filename Filename to write to
*/
void writeConfigFile (const char *filename) const;
/// return string with class representation
std::string idstr () const;
/// return string with class representation. series of level is expended
std::string idstrseriesfull () const;
/// return string with class representation
std::string fullidstr (int series = 0) const;
/// return latex string describing the class
std::string latexstr (int cmd = 0, int series = 0) const;
public:
arraydata_t reduceColumns (int k) {
arraydata_t adata (this, k);
return adata;
}
/// Return string used for displaying the class
std::string showstr () const;
void show (int verbose = 1) const;
/// Calculate derived data such as the index and column groups from a design
void complete_arraydata ();
/// check whether the LMC calculation will overflow
void lmc_overflow_check () const;
// complete arraydata but split the column groups at the last column
void complete_arraydata_fixlast ();
// complete arraydata but split the column groups at ns
void complete_arraydata_splitn (int ns);
// set column groups at positions given by argument vector
void set_colgroups (const std::vector< int > splits);
/// set column group equal to that of a symmetry group
void set_colgroups (const symmetry_group &sg);
/// return sizes of the column groups
std::vector<int> get_column_groups_sizes() const;
/// show column groups in the array class
void show_colgroups () const;
/// calculate the index of the orthogonal arrays in this class
void calculate_oa_index (colindex_t strength);
/// return the root array for the class
array_link create_root (int n_columns = -1, int fill_value = 0) const;
/// return the factor level for the specified column return -1 if the column index is invalid
int getfactorlevel(int idx) const;
/// return factor levels
std::vector< int > getS () const {
myprintf("getS(): deprecated method: use factor_levels instead\n");
std::vector< int > s (this->ncols);
for (int i = 0; i < this->ncols; i++) {
s[i] = this->s[i];
}
return s;
}
/// return factor levels
std::vector< int > factor_levels () const;
/// return factor levels for the column groups
std::vector< int > factor_levels_column_groups() const;
/**
* @brief Reset strength of arraydata
* @param strength The strength to reset the structure to
*/
void reset_strength(colindex_t strength);
/// Return index of the column group for a column
colindex_t get_col_group(const colindex_t col) const;
public:
/// Return True if the factor levels are sorted from large to small
bool is_factor_levels_sorted() const;
};
/// Read array configuration from file
arraydata_t *readConfigFile (const char *file);
/**
* @brief Function similar to printf returning C++ style string
* @param message
* @return
*/
std::string printfstring(const char *message, ...);
/**
* @brief Make a copy of an array
*/
inline void copy_array (const array_t *src, array_t *const dst, const int nrows, const int ncols) {
memcpy (dst, src, sizeof (array_t) * nrows * ncols);
}
/**
* @brief Delete an array
* @param array
* @return
*/
inline int destroy_array (array_t *array) {
free (array);
return 0;
}
/**
* @brief Create an array
* @param nrows Number of rows
* @param ncols Number of columns
* @return
*/
static inline array_t *create_array (const int nrows, const int ncols) {
array_t *array = (array_t *)malloc (nrows * ncols * sizeof (array_t));
if (array == NULL) {
throw_runtime_exception(printfstring("create_array: problem with malloc of size %dx%d", nrows, ncols));
}
return array;
}
/**
* @brief Create an array from an arraydata_t structure
*/
inline array_t *create_array (const arraydata_t *ad) { return create_array (ad->N, ad->ncols); }
/**
* @brief Clone an array
*/
inline array_t *clone_array (const array_t *const array, const rowindex_t nrows, const colindex_t ncols) {
array_t *clone = create_array (nrows, ncols);
copy_array (array, clone, nrows, ncols);
return clone;
}
/*** \brief Class representing an array
*/
struct array_link {
/// Number of rows in array
rowindex_t n_rows;
/// Number of columns in array
colindex_t n_columns;
/// Index number
int index;
/// Pointer to array data
array_t *array;
static const int INDEX_NONE = 0;
static const int INDEX_ERROR = -1;
static const int INDEX_DEFAULT = 0;
/** A class representing an integer valued array
*
*/
array_link ();
/** @copydoc array_link::array_link()
*
* The array is intialized with zeros.
*
* \param nrows Number of rows
* \param ncols Number of columns
* \param index Number to keep track of lists of designs
*/
array_link (rowindex_t nrows, colindex_t ncols, int index);
/** @copydoc array_link::array_link()
*
* Initialize with data from a pointer.
*/
array_link (rowindex_t nrows, colindex_t ncols, int index, carray_t *data);
/** @copydoc array_link::array_link()
*
* Initialize with data from another array_link object.
*/
array_link (const array_link &);
/** @copydoc array_link::array_link()
*
* Initialize with data from an Eigen matrix.
*/
array_link (Eigen::MatrixXd &eigen_matrix);
/** @copydoc array_link::array_link()
*
* The array is initialized by permuting the columns of another array
*
* \param array Source to copy from
* \param column_permutation The permuntation to apply
*/
array_link(const array_link &array, const std::vector< int > &column_permutation);
/// @copydoc array_link::array_link()
array_link(const array_t *array, rowindex_t nrows, colindex_t ncols, int index = 0);
/// @copydoc array_link::array_link()
array_link(const array_t *array, rowindex_t nrows, colindex_t ncolsorig, colindex_t ncols, int index);
/** @copydoc array_link::array_link()
*
* The array is initialized by copying the values from a vector.
*/
array_link(const std::vector< int > &values, rowindex_t nrows, colindex_t ncols, int index = 0);
~array_link ();
#ifdef SWIGCODE
/// Create array_link from a raw memory buffer
array_link (long *pymatinput, int nrows, int ncols);
#endif
array_link clone () const;
public:
/// print an array to output stream
friend std::ostream &operator<< (std::ostream &, const array_link &A);
/// print array to stdout
void showarray () const;
/// print array to string
std::string showarrayString () const;
/// print array to stdout in compact format (no whitespace between elemenents)
void showarraycompact () const;
/// print array properties to stdout
void showproperties () const;
/// return true if the array is a 2-level array (e.g. only contains values 0 and 1)
bool is2level () const;
/// return true is the array is a mixel-level array
bool is_mixed_level() const;
/// return true is the array is array with values in 0, 1, ..., for each column
bool is_orthogonal_array() const;
/** return true if the array is a +1, 0, -1 valued array
*/
bool is_conference () const;
/// return true if the array is a +1, 0, -1 valued array, with specified number of zeros in each column
bool is_conference (int number_of_zeros) const;
/// return true if the array is symmetric
bool isSymmetric () const;
/// make the array symmetric by copying the upper-right to the lower-left
void makeSymmetric ();
/// return array with selected column removed
array_link deleteColumn (int index) const;
/// return array with first number_of_arrays rows
array_link selectFirstRows (int nrows) const;
/// return array with first number_of_arrays columns selected
array_link selectFirstColumns (int ncolumns) const;
/// return array with last number_of_arrays columns selected
array_link selectLastColumns (int ncolumns) const;
/// select columns from an array
array_link selectColumns (const std::vector< int > c) const;
/// select single column from an array
array_link selectColumns (int c) const;
/// set a column of the array to the given vector
void setColumn (int c, const std::vector< int > v) {
std::copy (v.begin (), v.end (), this->array + c * this->n_rows);
}
/// set a column of the array to the given vector
void setColumn (int c, const std::vector< signed char > v) {
std::copy (v.begin (), v.end (), this->array + c * this->n_rows);
}
/// return transposed array
array_link transposed () const;
/// calculate D-efficiency
double Defficiency () const;
/// calculate main effect robustness (or Ds-optimality)
double DsEfficiency (int verbose = 0) const;
/// calculate D-efficiency, calculate main effect robustness (or Ds-optimality) and D1-efficiency for an orthogonal array
std::vector< double > Defficiencies (int verbose = 0, int addDs0 = 0) const;
/*** Calculate average variation inflation factor
*
* If the VIF is infinite, the value 0 is returned. The VIF takes values between 1 and infinity.
*/
double VIFefficiency () const;
/// calculate A-efficiency
double Aefficiency () const;
/// calculate E-efficiency
double Eefficiency () const;
/** Calculate F-values of a 2-level matrix.
*
* This assumes the strength is at least 3. Otherwise use the jstruct_t object
*/
std::vector< int > Fvalues (int number_of_columns) const;
/** Calculate F-values of a conference design
*
* \param number_of_columns Number of columns to use
* \return The Fk vector with k the number of columns specified
*
**/
std::vector< int > FvaluesConference (int number_of_columns) const;
/** Calculate the Jk-characteristics of the matrix (the values are signed)
*
* \param jj Number of columns to use
* \returns Vector with calculated Jk values
*/
std::vector< int > Jcharacteristics (int jj = 4) const;
/// Calculate the projective estimation capacity sequence
std::vector< double > PECsequence (int verbose = 0) const;
/// Calculate the projective information capacity sequence
std::vector< double > PICsequence(int verbose = 0) const;
/// calculate rank of array
int rank () const;
/** Calculate generalized wordlength pattern
*
* @see ::GWLP
*/
std::vector< double > GWLP (int truncate = 1, int verbose = 0) const;
/// calculate strength of an array
int strength () const;
/// return true if the array is a foldover array
bool foldover () const;
// return value of minimum element in array
array_t min () const;
// return value of maximum element in array
array_t max () const;
/** Calculate centered L2 discrepancy
*
* The method is from "A connection between uniformity and aberration in regular fractions of two-level factorials", Fang and Mukerjee, 2000
*/
double CL2discrepancy () const;
/// apply a random permutation of rows, columns and levels of an orthogonal array
array_link randomperm () const;
/// apply a random permutation of columns of an orthogonal array
array_link randomcolperm () const;
/// apply a random permutation of rows of an orthogonal array
array_link randomrowperm () const;
/** Caculate model matrix of an orthogonal array
*
* \param order For 0 return only the intercept; for 1 return intercept and main effects; for 2 return intercept, main effects and interaction effects.
* \param intercept If 1, then include the intercept in the output.
* \param verbose Verbosity level
* \return Calculated model matrix
*
* This function uses @ref array2eigenModelMatrixMixed for the calculation.
*/
MatrixFloat getModelMatrix (int order, int intercept = 1, int verbose = 0) const;
array_link &operator= (const array_link &rhs);
array_link &deepcopy (const array_link &rhs);
array_link &shallowcopy (const array_link &rhs);
/** @brief Return True if both arrays are equal
*
* \param rhs Array to compare to
* \returns 1 if arrays are equal. 0 otherwise. Returns 0 if arrays have different sizes
*/
int operator== (const array_link &rhs) const;
int operator!= (const array_link &rhs) const;
int operator< (const array_link &rhs) const;
int operator> (const array_link &rhs) const;
/// return true of two array have the same dimensions
int equalsize(const array_link &rhs) const;
/// elementwise addition
array_link operator+ (const array_link &) const;
/// elementwise addition
array_link operator+ (array_t value) const;
array_link operator- (const array_link &) const;
array_link operator- (array_t value) const;
/// elementwise multiplication
array_link operator* (const array_link &rhs) const;
array_link operator* (array_t value) const;
array_link operator*= (array_t value);
array_link operator+= (array_t value);
array_link operator-= (array_t value);
/// get element from array, no error checking, inline version
inline const array_t &atfast (const rowindex_t r, const colindex_t c) const {
return this->array[r + this->n_rows * c];
}
/// get element from array, no error checking, inline version
inline array_t &atfast (const rowindex_t r, const colindex_t c) { return this->array[r + this->n_rows * c]; }
/// get element at specified position, no bounds checking
array_t _at (const rowindex_t, const colindex_t) const;
/// get element at specified position, no bounds checking
array_t _at (const int index) const;
/// get element at specified position
array_t at (const rowindex_t, const colindex_t) const;
/// get element at specified position
array_t at (const int index) const;
/// get element at specified position
array_t &at (const rowindex_t, const colindex_t);
/// set all elements in the array to a value
void setconstant (array_t value);
/// set value of an array
void setvalue (int row, int col, int value);
/// set value of an array
void setvalue (int row, int col, double value);
/// set value of an array, no bounds checking!
void _setvalue (int row, int col, int value);
/// multiply a row by -1
void negateRow (rowindex_t row);
/// print information about array
void show () const;
/// return string describing the array
std::string showstr () const;
/// return md5 sum of array representation (as represented with 32bit int datatype in memory)
std::string md5 () const;
/// return true if two columns are equal
bool columnEqual (int column_index, const array_link &rhs, int column_index_rhs) const;
/// return index of first different column
int firstColumnDifference (const array_link &A) const;
/** Calculate row and column index of first difference between two arrays
*
* The difference is according to the column-major ordering.
*/
bool firstDiff(const array_link &A, int &r, int &c, int verbose = 1) const;
/// create root in arraylink
void create_root (const arraydata_t &arrayclass, int fill_value = 0);
/// return fraction of nonzero elements in array
double nonzero_fraction () const;
/// fill array with zeros
void clear();
// getarraydata (Python interface). this needs to be of type int32 (default python int type)
void getarraydata (int *pymat1, int n) { std::copy (this->array, this->array + n, pymat1); }
/// internal function
template < class numtype > void setarraydata (const numtype *tmp, int n) {
if (n != this->n_rows * this->n_columns)
myprintf ("array_link:setarraydata: warning: number of elements incorrect: n %d, %d %d\n", n,
this->n_rows, this->n_columns);
std::copy (tmp, tmp + n, this->array);
}
/// internal function
template < class numtype > void setarraydata_transposed (const numtype *input_data, int n) {
if (n != this->n_rows * this->n_columns)
myprintf ("array_link:setarraydata: warning: number of elements incorrect: n %d, %d %d\n", n,
this->n_rows, this->n_columns);
int i = 0;
for (int row = 0; row < this->n_rows; row++) {
for (int col = 0; col < this->n_columns; col++) {
this->array[row + col * this->n_rows] = input_data[i];
i++;
}
}
}
/// special method for SWIG interface
void setarraydata (std::vector< int > tmp, int n) { std::copy (tmp.begin (), tmp.begin () + n, this->array); }
/// internal function
template < class numtype > void setarraydata (std::vector< numtype > tmp, int n) {
std::copy (tmp.begin (), tmp.begin () + n, this->array);
}
/// set column to values
void setcolumn (int target_column, const array_link &source_array, int source_column = 0) const;
public:
void init (rowindex_t r, colindex_t c); // made public for python interface
/// return the row_symmetry group of an array
symmetry_group row_symmetry_group () const;
/// return the LMC form of the array
array_link reduceLMC () const;
/// return the delete-one-factor-projection form of the array
array_link reduceDOP () const;
/// return the array as an Eigen matrix
MatrixFloat getEigenMatrix() const;
/// return true of specified column is smaller than column in another array
int columnGreater (int c1, const array_link &rhs, int rhs_column) const;
void debug () const;
#ifdef SWIGCODE
void *data (); /// return pointer to data, needed for swig interface
#endif
private:
/// return true if both arrays have the same size
bool equal_size(const array_link &array) const;
bool _valid_index (const rowindex_t r, const colindex_t c) const;
bool _valid_index (int index) const;
};
#ifdef SWIGCODE
/// Create array_link from numpy array
array_link create_array_link(long* pymatinput, int number_of_rows, int number_of_columns);
/// Update the data of an array_link with the specified data
void update_array_link(array_link &al, long* pymatinput, int number_of_rows, int number_of_columns);
#endif
/** Return -1 if the first array is smaller in LMC ordering than the second array, 0 if equal and 1 otherwise **/
int compareLMC(const array_link &lhs, const array_link &rhs);
/** Return example array
*
* \param idx Index of example array to return
* \param verbose If True, then print information about the array to stdout
*/
array_link exampleArray(int idx = 0, int verbose = 0);
/** Calculate Jk-characteristics for a conference design
*
* \param array Conference design
* \param number_of_columns Specifies the number of columns to use
* \param verbose Verbosity level
* \return A vector of calculated inner products between all combinations of k columns.
*/
std::vector< int > Jcharacteristics_conference(const array_link &array, int number_of_columns, int verbose = 0);
/// data type for elements of conference designs
typedef signed char conf_t;
/// data type for column of a conference design
typedef std::vector< conf_t > conference_column;
/// list of columns of conference designs
typedef std::vector< conference_column > conference_column_list;
/// concatenate 2 arrays in vertical direction
array_link hstack (const array_link &array1, const array_link &array2);
/// concatenate array and conference_column
array_link hstack (const array_link &array, const conference_column &column);
/// concatenate 2 arrays in horizontal direction
array_link hstack (const array_link &array_left, const array_link &array_right);
/// concatenate the last column of array B to array A
array_link hstacklastcol (const array_link &A, const array_link &B);
/// concatenate two columns
conference_column vstack(const conference_column &column_top, const conference_column &column_bottom);
/// perform column permutation for an array
void perform_column_permutation (const array_link source, array_link &target, const std::vector< int > perm);
/// perform row permutation for an array
void perform_row_permutation (const array_link source, array_link &target, const std::vector< int > perm);
/** create arraydata_t structure from array
*
* \param array Array to use as input specifiction for array class
* \param extracols Number of extra columns to add to the number of columns of the array
* \param strength Strength to set in the array class. If -1, then use the strength of the array
*/
arraydata_t arraylink2arraydata (const array_link &array, int extracols = 0, int strength = 2);
/// container with arrays
typedef std::deque< array_link > arraylist_t;
/// add a constant value to all arrays in a list
arraylist_t addConstant (const arraylist_t &lst, int value);
/** Return number of arrays with j_{2n+1}=0 for number_of_arrays<m */
std::vector< int > getJcounts (arraylist_t *arraylist, int N, int k, int verbose = 1);
/**
* @brief struct to hold data of an array, e.g. J-characteristic. Abstract base class
*
*/
class jstructbase_t {
public:
/// calculated J-characteristics
std::vector< int > values;
// possible values for Jk-characteristics
std::vector< int > jvalues;
/// map from Jk-value to index in the jvalues variable
std::map< int, int > jvalue2index;
/// number of columns
int jj;
public:
/// calculate maximum J value
int maxJ () const;
/// calculate possible values in F vector
std::vector< int > Jvalues () const { return this->jvalues; }
/** Calculate histogram of J values
*
* \return Histogram of J values
*
* The histogram bins are given by the values of @ref Jvalues.
*
**/
std::vector< int > calculateF () const;
/// Calculate the J-values for a given array
virtual void calc (const array_link &array) = 0;
/// Show contents of structure
void show ();
void showdata (int verbose = 1);
std::string showstr ();
/// return 1 if all vals are zero
int allzero () {
for (size_t i = 0; i < this->jvalues.size (); ++i) {
if (this->jvalues[i] != 0) {
return 0;
}
}
return 1;
}
};
/// structure containing data related to symmetries of arrays
struct symmdata {
public:
array_link rowvalue;
array_link orig;
array_link ft;
symmdata (const array_link &al, int minlen = 1);
void show (int verbose = 1) const {
myprintf ("symmdata: rowvalues\n");
this->rowvalue.showarray ();
if (verbose >= 2) {
myprintf ("symmdata: ft:");
this->ft.show ();
this->ft.showarray ();
}
}
/// list with indices set to check for symmetry reductions
std::vector< int > checkIdx (int col = -1) const {
const int N = this->orig.n_rows;
if (col < 0) {
col = orig.n_columns - 1;
}
std::vector< int > idx (N);
// never check first index
for (int row = 1; row < N; row++) {
if (this->rowvalue._at (row, col) == this->rowvalue._at (row - 1, col)) {
idx[row] = 1;
}
}
return idx;
}
};
/**
* @brief struct to hold data of an array, e.g. J-characteristic, rank
*
* See papers: Minimum G2-aberration properties of two-level foldover designs, Butler, 2004
* Design Selection and Classification for Hadamard Matrices Using Generalized Minimum Aberration Criteria,
* Deng and Tang
*
*/
class jstruct_t {
public:
/// number of rows in array
int N;
/// number of columns in array
int k;
/// J-characteristic that is calculated
int jj;
/// number of column combinations possible
int nc;
/// contains calculated J-values
std::vector< int > values;
/// calculated abberation
double abberration;
public:
/// Create an object to calculate J-characteristics
jstruct_t ();
/// Create an object to calculate J-characteristics
jstruct_t (const array_link &al, int jj = 4);
/// @copydoc jstruct_t::jstruct_t()
jstruct_t (const int N, const int K, const int jj = 4);
/// @copydoc jstruct_t::jstruct_t()
jstruct_t (const jstruct_t &js);
~jstruct_t ();
public:
jstruct_t &operator= (const jstruct_t &rhs);
/// calculate maximum J value
int maxJ () const;
/// Calculate the number of possible J values that can occur for the given strength
int number_J_values(int strength) const;
/** Calculate possible values in F vector
*
* \param strength Strength to use
* \return Vector with possible Jk values (ordered from high to low)
*
*/
std::vector< int > Fval (int strength = 3) const;
/// calculate histogram of J values for a 2-level array
std::vector< int > calculateF (int strength = 3) const;
/** Calculate aberration value
*
* This is equal to the sum of the squares of all Jk values, divided by the number of rows squared.
*
* The calculated abberation is stored in the variable abberation.
**/
void calculateAberration();
/// Show contents of structure
void show () const;
void showdata ();
std::string showstr ();
/// return 1 if all J values are zero, otherwise return 0
int allzero() const;
private:
/// init data structures
void init(int N, int k, int jj);
/// calculate J-characteristics of a 2-level array
void calc(const array_link &al);
/// calculate J-characteristics of a 2-level array, special function for jj=4
void calcj4(const array_link &al);
/// calculate J-characteristics of a 2-level array, special function for jj=5
void calcj5(const array_link &al);
};
/** Calculate J-characteristics of conference designs
*
**/
class jstructconference_t : public jstructbase_t {
public:
/** Create structure to calculate J-characteristics of conference designs
*
* \param N Number of rows
* \param jj Number of columns to use for the Jk-characteristics
**/
jstructconference_t (int N, int jj = 4) {
this->jj = jj;
calcJvalues (N, jj);
}
/** Calculate J-characteristics of a conference design
*
* \param array Array to calculate the J-characteristics for
* \param jj Number of columns to use for the Jk-characteristics
**/
jstructconference_t (const array_link &array, int jj = 4) {
this->jj = jj;
const int N = array.n_rows;
calcJvalues (N, jj);
calc (array);
}
private:
void calcJvalues(int N, int jj);
void calc(const array_link &al);
};
/// set first columns of an array to root form
void create_root (array_t *array, const arraydata_t *arrayclass);
/// Creates the root of an orthogonal array. The root is appended to the list of arrays
void create_root (const arraydata_t *arrayclass, arraylist_t &solutions);
/// Compare 2 arrays and return position of first difference
int array_diff (carray_p A, carray_p B, const rowindex_t r, const colindex_t c, rowindex_t &rpos, colindex_t &cpos);
/// helper function to calculate J-values
inline void fastJupdate (const array_t *array, rowindex_t N, const int J, const colindex_t *column_indices, array_t *tmp) {
for (int i = 0; i < J; i++) {
carray_t *cp = array + N * column_indices[i];
for (rowindex_t r = 0; r < N; r++) {
tmp[r] += cp[r];
}
}
return;
}
/** Calculate J-value for a 2-level array
*/
int jvalue (const array_link &array, const int J, const int *column_indices);
/** Calculate J-value for a column combination of a 2-level array
*
* We assume the array has values 0 and 1. No boundary checks are performed.
*/
int jvaluefast (const array_t *array, rowindex_t N, const int J, const colindex_t *column_indices);
/// Analyse a list of arrays
std::vector< jstruct_t > analyseArrays (const arraylist_t &arraylist, const int verbose, const int jj = 4);
/** \brief Contains a transformation of an array
*
* Contains an array transformation. The transformation consists of column, row and
* level permutations. The level and column permutations are not commutative (since the level permutations
* are tied to a particular column). We apply the column permutations first.
*
*/
class array_transformation_t {
public:
/// row permutation
rowperm_t rperm;
/// column permutation
colperm_t cperm;
/// level permutations
levelperm_t *lperms;
/// type of array
const arraydata_t *ad;
public:
array_transformation_t (const arraydata_t *arrayclass);
array_transformation_t (const arraydata_t &arrayclass);
array_transformation_t ();
/// copy constructor
array_transformation_t (const array_transformation_t &transformation);
/// assignment operator
array_transformation_t &operator= (const array_transformation_t &at);
~array_transformation_t ();
/// show the array transformation
void show () const;
/// return true if the transformation is equal to the identity
bool isIdentity () const;
/// return the inverse transformation
array_transformation_t inverse () const;
/// return the transformation to the identity transformation
void reset ();
/// initialize to a random transformation
void randomize ();
/// initialize with a random column permutation
void randomizecolperm ();
/// initialize with a random row permutation
void randomizerowperm ();
/// apply transformation to an array_link object
array_link apply(const array_link &array) const;
/// Comparison operator
int operator== (const array_transformation_t &t2) const;
/// composition operator. the transformations are applied from the left
array_transformation_t operator* (const array_transformation_t b) const;
/// apply transformation to an array (inplace)
void apply (array_t *sourcetarget) const;
/// apply transformation to an array
void apply (const array_t *source, array_t *target) const;
/// apply transformation and show resulting array
void print_transformed (carray_t *source) const;
void show (std::ostream &out) const;
/// return the row permutation of the transformation
std::vector< int > rowperm () const;
/// return the column permutation of the transformation
std::vector< int > colperm () const;
/// return the level permutations of the transformation
std::vector< int > lvlperm (int c) const;
/// set the row permutation of the transformation
void setrowperm (std::vector< int > row_permutation);
/// set the column permutation of the transformation
void setcolperm (std::vector< int > column_permutation);
/// set the level permutation of the transformation
void setlevelperm (int column_index, std::vector< int > lvl_permutation);
private:
/// initialize permutation structures
void allocate_data_structures ();
/// free permutation structures and arraydata_t structure
void free_data_structures ();
};
/** \brief Contains a transformation of a conference matrix
*
* Contains an array transformation. The transformation consists of column permutations, row permutations and sign
* switches for both the rows and columns.
*
* The sign switches and the permutations are not commutative. We apply the permutations first and then the sign flips.
*
*/
class conference_transformation_t {
public:
/// row permutation of the transformation
std::vector< int > rperm;
/// column permutation of the transformation
std::vector< int > cperm;
/// sign flips for the columns
std::vector< int > cswitch;
/// sign flips for the rows
std::vector< int > rswitch;
/// number of rows
int nrows;
/// number of columns
int ncols;
public:
conference_transformation_t (); /// default constructor
conference_transformation_t (int nrows, int ncols);
conference_transformation_t (const array_link &al);
conference_transformation_t (const conference_transformation_t &T);
/// show the array transformation
void show (int verbose = 1) const;
/// return true if the transformation is equal to the identity
bool isIdentity () const;
/// return the inverse transformation
conference_transformation_t inverse () const;
/// return the transformation to the identity transformation
void reset ();
/// initialize to a random transformation
void randomize ();
/// initialize with a random column permutation
void randomizecolperm ();
/// initialize with a random row permutation
void randomizerowperm ();
/// initialize with random col switches
void randomizecolflips ();
/// initialize with random row switches
void randomizerowflips ();
/// apply transformation to an array_link object
array_link apply (const array_link &al) const;
int operator== (const conference_transformation_t &rhs) const;
/** composition operator. the transformations are applied from the left
*
* E.g. (T1*T2)(x) = T1(T2(x))
*
*/
conference_transformation_t operator* (const conference_transformation_t &rhs) const;
void setrowperm (std::vector< int > rp) { rperm = rp; };
void setcolperm (std::vector< int > cp) { cperm = cp; };
private:
void init (int nr, int nc); //< initialize permutation structures
};
/* functions for working with array files*/
/// print a list of arrays to stdout
void showArrayList (const arraylist_t &lst);
#ifdef FULLPACKAGE
namespace arrayfile {
/// file format mode
enum arrayfilemode_t {
/// text based format
ATEXT,
/// write arrays to a text file in a format that can be parsed by LaTeX
ALATEX,
/// binary format
ABINARY,
/// binary format storing differences of arrays
ABINARY_DIFF,
/// binary format storing differences of arrays and zero offsets
ABINARY_DIFFZERO,
AERROR,
/// automatically determine the format
A_AUTOMATIC,
/// automatically determine the format (but binary)
A_AUTOMATIC_BINARY
};
/// file mode for array file
enum afilerw_t { READ, WRITE, READWRITE };
/** @brief Structure for reading or writing a file with arrays
*
* The format of the file is determined by the ``arrayfilemode_t``
* The format described in detail in the documentation of the OApackage https://oapackage.readthedocs.io/en/latest/.
*
*/
struct arrayfile_t {
public:
/// location of file on disk
std::string filename;
/// True of the file is compressed with gzip
int iscompressed;
/// number of rows of the arrays
int nrows;
/// number of columns of the arrays
int ncols;
/// number of bits used when storing an array
int nbits;
/// file mode, can be ATEXT or ABINARY, ABINARY_DIFF, ABINARY_DIFFZERO
arrayfilemode_t mode;
/// file opened for reading or writing
afilerw_t rwmode;
// we cannot define SWIG variables as int32_t, we get errors in the Python module
/// number of arrays in the file
int narrays;
int narraycounter;
/// maximum number of arrays in structure
static const int NARRAYS_MAX = 2 * 1000 * 1000 * 1000;
public:
/** Structure for reading or writing a file with arrays
*/
arrayfile_t ();
/** @copydoc arrayfile_t::arrayfile_t()
*
* \param filename File to open for reading
* \param verbose Verbosity level
*/
arrayfile_t (const std::string filename, int verbose = 1);
/** @copydoc arrayfile_t::arrayfile_t()
*
* Open new array file for writing
*
* \param filename File to open
* \param nrows Number of rows
* \param ncols Number of columns
* \param narrays Specify a number of arrays, or -1 to add dynamically
* \param mode File mode
* \param number_of_bits Number of bits to use for storage. For 2-level arrays only 1 bit is needed
*/
arrayfile_t (const std::string filename, int nrows, int ncols, int narrays = -1, arrayfilemode_t mode = ATEXT,
int number_of_bits = 8);
/// destructor function, closes all filehandles
~arrayfile_t ();
/// Open a new file for writing and (if opened) close the current file
void createfile (const std::string filename, int nrows, int ncols, int narrays = -1, arrayfilemode_t m = ATEXT,
int number_of_bits = 8);
/// close the array file
void closefile ();
/// return true if file is open
int isopen () const;
/// seek to specified array position
int seek (int pos);
/// read array and return index
int read_array (array_link &a);
/// read next array from the file
array_link readnext ();
/// read set of array from the file
arraylist_t readarrays (int nmax = NARRAYS_MAX, int verbose = 1);
/// flush any open file pointer
void flush ();
/// return true if the file has binary format
bool isbinary () const;
/// append list of arrays to the file
int append_arrays (const arraylist_t &arrays, int startidx = -1);
/// append a single array to the file
void append_array (const array_link &a, int specialindex = -1);
/// Add a comment to an array file (only available in text mode)
void add_comment(const std::string &comment);
/// return True if code is wrapped by SWIG
int swigcheck () const;
/// return string describing the object
std::string showstr () const;
/// return current position in file
size_t pos () const { return narraycounter; }
/// return true of the file format has random access mode
bool hasrandomaccess () const { return (this->mode == ABINARY); }
private:
public:
FILE *nfid;
#ifdef USEZLIB
/// pointer to compressed file
gzFile gzfid;
#else
/// pointer to compressed file
int gzfid;
#endif
/// verbosity level
int verbose;
private:
array_link diffarray;
/// return header size for binary format array
int headersize () const;
/// return size of bit array
int barraysize () const;
/// wrapper function for fwrite or gzwrite
size_t afwrite (void *ptr, size_t t, size_t n);
/// wrapper function for fread or gzread
size_t afread (void *ptr, size_t sz, size_t cnt);
public:
// update numbers count for a file structure
void updatenumbers ();
/// read array and return index
int read_array (array_t *array, const int nrows, const int ncols);
void finisharrayfile();
/// set verbosity level
void setVerbose(int v);
private:
int read_array_binary_zero (array_link &a);
void write_array_binary (carray_t *array, const int nrows, const int ncols);
void write_array_binary (const array_link &A);
/** Write an array in binary diff mode to a file
*
* We only write the section of columns of the array that differs from the previous array.
*/
void write_array_binary_diff (const array_link &A);
/** Write an array in binary diffzero mode */
void write_array_binary_diffzero (const array_link &A);
public:
int getnbits ();
/// parse string to determine the file mode
static arrayfile::arrayfilemode_t parseModeString (const std::string format);
/// return number of bits necessary to store an array
static int arrayNbits (const arraydata_t &ad) {
int m = 0;
for (int i = 0; i < ad.ncols; ++i) {
if (ad.s[i] > m) {
m = ad.s[i];
}
}
if (m == 2) {
return 1; // bit
} else if (m < 120) {
return 8; // char
} else {
return 32; // int32_t
}
}
/// return number of bits necessary to store an array
static int arrayNbits (const array_link &A) {
int m = A.max ();
int amin = A.min ();
m = std::max (m, -amin + 1);
if (m == 1) {
return 1; // bit
} else if (m < 124) {
return 8; // char
} else {
return 32; // int32_t
}
};
protected:
void writeheader ();
/// Read a binary array from a file
void read_array_binary (array_t *array, const int nrows, const int ncols);
};
}
using namespace arrayfile;
/// return number of arrays in an array file
long nArrays (const char *fname);
/** return information about file with arrays
*
* \param filename Filename of array file
* \param number_of_arrays Variable is set with number of arrays
* \param number_of_rows Variable is set with number of rows
* \param number_of_columns Variable is set with number of columns
*/
void arrayfileinfo(const char *filename, int &number_of_arrays, int &number_of_rows, int &number_of_columns);
/** Read all arrays in a file
*
* @param fname Filename to read from
* @param verbose Verbosity level
* @param setcols Pointer to return number of columns from array file
* @return List of arrays
*/
arraylist_t readarrayfile (const char *fname, int verbose = 1, int *setcols = 0);
/** Read all arrays in a file and append then to an array list
*
* @param filename Filename to read from
* @param arraylist Pointer to list of arrays
* @param verbose Verbosity level
* @param setcols Reference that is set with the number of columns from the file
* @param setrows Reference that is set with the number of rows from the file
* @param setbits Reference that is set with the number of bits from the file
* @return
*/
int readarrayfile(const char *filename, arraylist_t *arraylist, int verbose = 1, int *setcols = 0,
int *setrows = 0, int *setbits = 0);
const int NRAUTO = 0;
/** Write a list of arrays to file on disk
*
* @param filename Filename to use
* @param arraylist List of arrays to write
* @param mode Mode for the file with designs
* @param nrows If the list of arrays is empty, use this number of rows for the design file
* @param ncols If the list of arrays is empty, use this number of rows for the design file
* @return Value zero if succesfull
*/
int writearrayfile (const char *filename, const arraylist_t &arraylist, arrayfile::arrayfilemode_t mode = arrayfile::ATEXT,
int nrows = NRAUTO, int ncols = NRAUTO);
/// Write a single array to file
int writearrayfile (const char *filename, const array_link &array, arrayfile::arrayfilemode_t mode = arrayfile::ATEXT);
/// Append a single array to an array file. creates a new file if no file exists
int append_arrayfile (const char *filename, const array_link array);
/// Make a selection of arrays from binary array file, append to list
void selectArrays (const std::string filename, std::vector< int > &idx, arraylist_t &fl, int verbose = 0);
/// Select a single array from a file
array_link selectArrays (std::string filename, int index);
#endif // FULLPACKAGE
/// Make a selection of arrays
arraylist_t selectArrays (const arraylist_t &input_list, std::vector< int > &idx);
/// Make a selection of arrays
arraylist_t selectArrays (const arraylist_t &input_list, std::vector< long > &idx);
/// Make a selection of arrays, append to list
void selectArrays (const arraylist_t &input_list, std::vector< int > &idx, arraylist_t &output_list);
/// Make a selection of arrays, append to list
void selectArrays (const arraylist_t &input_list, std::vector< long > &idx, arraylist_t &output_list);
/// From a container keep all elements with specified indices
template < class Container, class IntType > void keepElements (Container &al, std::vector< IntType > &idx) {
for (int jj = idx.size () - 1; jj >= 0; jj--) {
if (!idx[jj]) {
al.erase (al.begin () + jj);
}
}
}
/// From a container remove all elements with specified indices
template < class Container, class IntType > void removeElements (Container &al, std::vector< IntType > &idx) {
for (int jj = idx.size () - 1; jj >= 0; jj--) {
if (idx[jj]) {
al.erase (al.begin () + jj);
}
}
}
/// Make a selection of arrays from a list, append to list
template < class MType >
void selectArraysMask (const arraylist_t &al, std::vector< MType > &mask, arraylist_t &rl) {
myassert (al.size () == mask.size ());
for (int idx = 0; idx < al.size (); idx++) {
if (mask[idx]) {
rl.push_back (al.at (idx));
}
}
}
/// Append selection of arrays to existing list
template < class IndexType >
void appendArrays (const arraylist_t &al, const typename std::vector< IndexType > &idx, arraylist_t &lst) {
for (typename std::vector< IndexType >::const_iterator it = idx.begin (); it < idx.end (); ++it) {
lst.push_back (al.at (*it));
}
}
/// Append set of arrays to existing list
void appendArrays(const arraylist_t &arrays_to_append, arraylist_t &dst);
/** Write a formatted array
*/
template < class atype >
void write_array_format (const atype *array, const int nrows, const int ncols, int width = 3) {
int count;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " " : "\n";
myprintf ("%3i%s", static_cast< int > (array[count]), s);
count += nrows;
}
}
#ifdef FULLPACKAGE
fflush (stdout);
setbuf (stdout, NULL);
#endif
}
/** @brief Write an array to a file pointer
*/
template < class atype > void write_array_format (FILE *fid, const atype *array, const int nrows, const int ncols) {
int count;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " " : "\n";
fprintf (fid, "%3i%s", static_cast< int > (array[count]), s);
count += nrows;
}
}
}
/// write an array in latex style
template < class atype >
void write_array_latex (std::ostream &ss, const atype *array, const int nrows, const int ncols) {
int count;
ss << "\\begin{tabular}{";
for (int x = 0; x < ncols; x++) {
ss << 'c';
}
ss << "}" << std::endl;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " & " : " \\\\ \n";
ss << array[count] << s;
count += nrows;
}
}
ss << "\\end{tabular}" << std::endl;
}
/** Convert a file with arrays to a different format
*/
void convert_array_file(std::string input_filename, std::string output_filename, arrayfile::arrayfilemode_t output_format, int verbose = 0);
/// structure to write arrays to disk, thread safe
struct arraywriter_t {
public:
/** Pointers to different data files.
*
* Since depth_extend is a depth first approach we need to store arrays with a different number of columns
**/
std::vector< arrayfile_t * > afiles;
/// only write arrays if this variable is true
bool writearrays;
/// number of arrays written to disk
int nwritten;
/// verbosity level
int verbose;
public:
arraywriter_t () {
writearrays = true;
verbose = 1;
};
~arraywriter_t () {
flush ();
closeafiles ();
}
/// flush all output files
void flush () {
for (size_t i = 0; i < afiles.size (); i++) {
arrayfile_t *af = afiles[i];
if (af != 0) {
#pragma omp critical
af->updatenumbers ();
af->flush ();
}
}
}
/// write a single array to disk
void writeArray (const array_link &A) {
// writing arrays with multiple threads at the same time is not supported
#ifdef DOOPENMP
#pragma omp critical
#endif
{
int i = A.n_columns;
if (writearrays) {
if (i < (int)afiles.size () && i >= 0) {
afiles[i]->append_array (A);
} else {
fprintf (stderr, "depth_extend_t: writeArray: problem: array file for %d "
"columns was not opened\n",
(int)i);
}
nwritten++;
}
}
}
/// write a list of arrays to disk
void writeArray (const arraylist_t &lst) {
for (size_t j = 0; j < lst.size (); j++) {
const array_link &A = lst[j];
writeArray (A);
}
}
/// initialize the result files
void initArrayFiles (const arraydata_t &ad, int kstart, const std::string prefix,
arrayfilemode_t mode = ABINARY_DIFF) {
afiles.clear ();
afiles.resize (ad.ncols + 1);
nwritten = 0;
for (size_t i = kstart; i <= (size_t)ad.ncols; i++) {
arraydata_t ad0 (&ad, i);
std::string afile = prefix + "-" + ad0.idstr () + ".oa";
if (verbose >= 3)
myprintf ("depth_extend_t: creating output file %s\n", afile.c_str ());
int nb = arrayfile_t::arrayNbits (ad);
afiles[i] = new arrayfile_t (afile, ad.N, i, -1, mode, nb);
}
}
/// return the total number arrays written to disk
int nArraysWritten () const { return nwritten; }
public:
void closeafiles () {
for (size_t i = 0; i < afiles.size (); i++) {
delete afiles[i];
}
afiles.clear ();
}
};
/** Read header for binary data file. Return true if valid header file
*
* The header consists of 4 integers: 2 magic numbers, then the number of rows and columns
*/
bool readbinheader(FILE *fid, int &nr, int &nc);
/// Write header for binary data file
void writebinheader(FILE *fid, int number_rows, int number_columns);
/// Write a vector of numeric elements to binary file as double values
template < class Type >
void vector2doublebinfile (const std::string fname, std::vector< Type > vals, int writeheader = 1) {
FILE *fid = fopen (fname.c_str (), "wb");
if (fid == 0) {
fprintf (stderr, "doublevector2binfile: error with file %s\n", fname.c_str ());
throw_runtime_exception("doublevector2binfile: error with file");
}
if (writeheader) {
writebinheader (fid, vals.size (), 1);
}
for (unsigned int i = 0; i < vals.size (); i++) {
double x = vals[i];
fwrite (&x, sizeof (double), 1, fid);
}
fclose (fid);
}
/// Write a vector of vector elements to binary file
void vectorvector2binfile(const std::string fname, const std::vector< std::vector< double > > vals,
int writeheader, int na);
/** Convert 2-level array to main effects in Eigen format
*
* \param array Array to convert
* \param intercept If True, then include the intercept
* \returns The main effects model
*/
MatrixFloat array2eigenX1 (const array_link &array, int intercept = 1);
/** Convert 2-level array to second order interaction matrix in Eigen format
*
* The intercept and main effects are not included.
*
* \param array Array to convert
* \returns The second order interaction model
*/
MatrixFloat array2eigenX2 (const array_link &array);
/** Convert 2-level array to second order interaction model matrix (intercept, main effects, interaction effects)
*
* \param array Design of which to calculate the model matrix
* \returns Eigen matrix with the model matrix
*/
MatrixFloat array2eigenModelMatrix (const array_link &array);
/** Create first and second order model matrix for mixed-level orthogonal array
*
* \param array Input array
* \param verbose Verbosity level
* \returns Pair with main effects and two-factor interaction model
*
* For 2-level arrays a direct calculation is used. For mixel-level arrays Helmert contrasts are used.
*/
std::pair< MatrixFloat, MatrixFloat > array2eigenModelMatrixMixed (const array_link &array, int verbose = 1);
/** Calculate number of parameters in the model matrix
*
* A list of integers is returned, with the number of columns in:
*
* - The intercept (always 1)
* - The main effects
* - The interaction effects (second order interaction terms without quadratics)
* - The quadratic effects
*
* \param array Orthogonal array or conference design
* \param order Not used any more
* \returns List of sizes
*/
std::vector< int > numberModelParams(const array_link &array, int order = -1);
/** return index of specified array in a file. returns -1 if array is not found
*
* \param array Array to find
* \param array_file Location if file with arrays
* \param verbose Verbosity level
* \returns Position of array in list
*/
int arrayInFile (const array_link &array, const char *array_file, int verbose = 1);
/** return index of specified array in a list. returns -1 if array is not found
*
* \param array Array to find
* \param arrays List of arrays
* \param verbose Verbosity level
* \returns Position of array in list
*/
int arrayInList (const array_link &array, const arraylist_t &arrays, int verbose = 1);
|
sparse_matrix.h | #ifndef SPARSE_MATRIX_H
#define SPARSE_MATRIX_H
// headers {{{
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <vector>
#include <cmath>
#include <cstddef>
#include <assert.h>
#include <omp.h>
#include <iostream>
#ifdef _MSC_VER
#if _MSC_VER >= 1600
#include <cstdint>
#else
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
#endif
#endif
#if __cplusplus >= 201103L || (defined(_MSC_VER) && (_MSC_VER >= 1500)) // Visual Studio 2008
#define CPP11
#endif
/* random number genrator: simulate the interface of python random module*/
#include <limits>
#if defined(CPP11)
#include <random>
template<typename engine_t=std::mt19937>
struct random_number_generator : public engine_t { // {{{
typedef typename engine_t::result_type result_type;
random_number_generator(unsigned seed=0): engine_t(seed){ }
result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; }
template<class T=double, class T2=double> T uniform(T start=0.0, T2 end=1.0) {
return std::uniform_real_distribution<T>(start, (T)end)(*this);
}
template<class T=double> T normal(T mean=0.0, T stddev=1.0) {
return std::normal_distribution<T>(mean, stddev)(*this);
}
template<class T=int, class T2=T> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) {
return std::uniform_int_distribution<T>(start, end)(*this);
}
template<class RandIter> void shuffle(RandIter first, RandIter last) {
std::shuffle(first, last, *this);
}
};
#else
#include <tr1/random>
template<typename engine_t=std::tr1::mt19937>
struct random_number_generator : public engine_t {
typedef typename engine_t::result_type result_type;
random_number_generator(unsigned seed=0): engine_t(seed) { }
result_type operator()() { return engine_t::operator()(); }
result_type operator()(result_type n) { return randint(result_type(0), result_type(n-1)); }
result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; }
template<class T, class T2> T uniform(T start=0.0, T2 end=1.0) {
typedef std::tr1::uniform_real<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,(T)end))();
}
template<class T, class T2> T normal(T mean=0.0, T2 stddev=1.0) {
typedef std::tr1::normal_distribution<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(mean, (T)stddev))();
}
template<class T, class T2> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) {
typedef std::tr1::uniform_int<T> dist_t;
return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,end))();
}
template<class RandIter> void shuffle(RandIter first, RandIter last) {
std::random_shuffle(first, last, *this);
}
}; // }}}
#endif
typedef random_number_generator<> rng_t;
template<typename T>
void gen_permutation_pair(size_t size, std::vector<T> &perm, std::vector<T> &inv_perm, int seed=0) { // {{{
perm.resize(size);
for(size_t i = 0; i < size; i++)
perm[i] = i;
rng_t rng(seed);
rng.shuffle(perm.begin(), perm.end());
//std::srand(seed);
//std::random_shuffle(perm.begin(), perm.end());
inv_perm.resize(size);
for(size_t i = 0; i < size; i++)
inv_perm[perm[i]] = i;
} // }}}
//#include "zlib_util.h"
// }}}
#define MALLOC(type, size) (type*)malloc(sizeof(type)*(size))
#define CALLOC(type, size) (type*)calloc((size), sizeof(type))
#define REALLOC(ptr, type, size) (type*)realloc((ptr), sizeof(type)*(size))
//namespace rofu {
typedef unsigned major_t;
const major_t ROWMAJOR = 0U;
const major_t COLMAJOR = 1U;
const major_t default_major = COLMAJOR;
// Zip Iterator
// Commom usage: std::sort(zip_iter(A.begin(),B.begin()), zip_iter(A.end(),B.end()));
template<class T1, class T2> struct zip_body;
template<class T1, class T2> struct zip_ref;
template<class IterT1, class IterT2> struct zip_it;
template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y);
#define dvec_t dense_vector
template<typename val_type> class dvec_t;
#define dmat_t dense_matrix
template<typename val_type> class dmat_t;
#define smat_t sparse_matrix
template<typename val_type> class smat_t;
#define eye_t identity_matrix
template<typename val_type> class eye_t;
#define gmat_t general_matrix
template<typename val_type> class gmat_t { // {{{
public:
size_t rows, cols;
gmat_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols){}
virtual bool is_sparse() const {return false;}
virtual bool is_dense() const {return false;}
virtual bool is_identity() const {return false;}
smat_t<val_type>& get_sparse() {assert(is_sparse()); return static_cast<smat_t<val_type>&>(*this);}
const smat_t<val_type>& get_sparse() const {assert(is_sparse()); return static_cast<const smat_t<val_type>&>(*this);}
dmat_t<val_type>& get_dense() {assert(is_dense()); return static_cast<dmat_t<val_type>&>(*this);}
const dmat_t<val_type>& get_dense() const {assert(is_dense()); return static_cast<const dmat_t<val_type>&>(*this);}
}; // }}}
template<typename val_type> class entry_iterator_t; // iterator for files with (i,j,v) tuples
template<typename val_type> class smat_iterator_t; // iterator for nonzero entries in smat_t
template<typename val_type> class smat_subset_iterator_t; // iterator for nonzero entries in a subset
template<typename val_type> class dmat_iterator_t; // iterator for nonzero entries in dmat_t
// H = X*W, (X: m*n, W: n*k row-major, H m*k row major)
template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const val_type* W, const size_t k, val_type *H);
template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H);
template<typename val_type> void gmat_x_dmat(const gmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H);
// H = a*X*W + H0, (X: m*n, W: n*k row-major, H m*k row major)
template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H);
template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
// H = a*X*W + b*H0, (X: m*n, W: n*k row-major, H m*k row major)
template<typename val_type, typename T2, typename T3>
void smat_x_dmat(T2 a, const smat_t<val_type>& X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H);
template<typename val_type, typename T2, typename T3>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
template<typename val_type, typename T2, typename T3>
void gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H);
// trace(W'*X*H)
template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H);
// Dense Vector
template<typename val_type>
class dvec_t { // {{{
friend class dmat_t<val_type>;
private:
bool mem_alloc_by_me;
void zero_init() {len = 0; buf = NULL; mem_alloc_by_me = false;}
public:
size_t len;
val_type *buf;
// Default Constructor
dvec_t() {zero_init();}
// Copy Constructor
dvec_t(const dvec_t& v) { // {{{
zero_init();
*this = v;
} // }}}
// Copy Assignment
dvec_t& operator=(const dvec_t& other) { // {{{
if(this == &other) return *this;
if(other.is_view()) { // view to view copy
if(mem_alloc_by_me) clear_space();
memcpy(this, &other, sizeof(dvec_t));
} else { // deep to deep copy
resize(other.size());
memcpy(buf, other.buf, sizeof(val_type)*len);
}
return *this;
} // }}}
// View Constructor: allocate space if buf == NULL
explicit dvec_t(size_t len, val_type *buf=NULL): len(len), buf(buf), mem_alloc_by_me(false) { // {{{
if(buf == NULL && len != 0) {
this->buf = MALLOC(val_type, len);
memset(this->buf, 0, sizeof(val_type)*len);
mem_alloc_by_me = true;
}
} // }}}
// Fill Constructor
explicit dvec_t(size_t len, const val_type &x) {zero_init();resize(len,x);}
// dense_matrix_t Converter
dvec_t(const dmat_t<val_type>& m) { // {{{
//puts("dvect dmat convert ctor");
zero_init();
if(m.is_view()) {len=m.rows*m.cols; buf=m.buf;}
else {
resize(m.rows*m.cols);
memcpy(buf, m.buf, sizeof(val_type)*len);
}
} // }}}
#if defined(CPP11)
// Move Constructor
dvec_t(dvec_t&& m){
zero_init(); *this = std::move(m);}
// Move Assignment
dvec_t& operator=(dvec_t&& other) { // {{{
if(this == &other) return *this;
clear_space();
memcpy(this, &other, sizeof(dvec_t));
other.zero_init();
return *this;
} // }}}
#endif
~dvec_t() {clear_space(); }
bool is_view() const {return mem_alloc_by_me==false;}
void clear_space() {if(mem_alloc_by_me) free(buf); zero_init();}
dvec_t get_view() const {return dvec_t(len, buf);}
dvec_t& grow_body() { // {{{
if(is_view()) {
dvec_t tmp_view = *this;
this->resize(len);
memcpy(buf, tmp_view.buf, sizeof(val_type)*len);
}
return *this;
} // }}}
dvec_t& assign(const dvec_t& other) { // {{{
assert(len == other.len);
return assign((val_type)1.0, other);
} // }}}
template<typename T>
dvec_t& assign(T a, const dvec_t& other) { // {{{
assert(len == other.len);
if(a == T(0))
memset(buf, 0, sizeof(val_type)*len);
else if(a == T(1)) {
if(this == &other)
return *this;
#pragma omp parallel for schedule(static)
for(size_t idx = 0; idx < len; idx++)
at(idx) = other.at(idx);
} else {
#pragma omp parallel for schedule(static)
for(size_t idx = 0; idx < len; idx++)
at(idx) = a*other.at(idx);
}
return *this;
} // }}}
size_t size() const {return len;};
void resize(size_t len_, const val_type &x) { // {{{
resize(len_);
for(size_t i = 0; i < len; i++)
buf[i] = x;
} // }}}
void resize(size_t len_) { // {{{
if(mem_alloc_by_me)
buf = REALLOC(buf, val_type, len_);
else
buf = MALLOC(val_type, len_);
mem_alloc_by_me = true;
len = len_;
} // }}}
val_type& at(size_t idx) {return buf[idx];}
const val_type& at(size_t idx) const {return buf[idx];}
val_type& operator[](size_t idx) {return buf[idx];}
const val_type& operator[](size_t idx) const {return buf[idx];}
val_type* data() {return buf;}
const val_type* data() const {return buf;}
void print(const char *str="") const {
printf("%s dvec_t: len %d, is_view %d, buf %p\n", str, len, is_view(), buf);
for(size_t i = 0; i < len; i ++)
printf("%g ", buf[i]);
puts("");
}
}; // }}}
// Dense Matrix
template<typename val_type>
class dmat_t : public gmat_t<val_type> { // {{{
friend class dvec_t<val_type>;
public:
// size_t rows, cols; inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
val_type *buf;
static dmat_t rand(rng_t &rng, size_t m, size_t n, double lower=0.0, double upper=1.0, major_t major_type_=default_major) { // {{{
dmat_t ret(m, n, major_type_);
if(lower >= upper) lower = upper;
for(size_t idx = 0; idx < m*n; idx++)
ret.buf[idx] = (val_type)rng.uniform(lower, upper);
return ret;
} // }}}
static dmat_t randn(rng_t &rng, size_t m, size_t n, double mean=0.0, double std=1.0, major_t major_type_=default_major) { // {{{
dmat_t ret(m, n, major_type_);
for(size_t idx = 0; idx < m*n; idx++)
ret.buf[idx] = (val_type)rng.normal(mean, std);
return ret;
} // }}}
private:
bool mem_alloc_by_me;
major_t major_type;
typedef dvec_t<val_type> vec_t;
std::vector<vec_t> vec_set; // view for each row/col depending on the major_type;
void zero_init() {rows=cols=0; buf=NULL; major_type=default_major; mem_alloc_by_me=false; vec_set.clear();}
void init_vec_set() { // {{{
if(is_rowmajor()) {
vec_set.resize(rows);
for(size_t r = 0; r < rows; r++)
vec_set[r] = dvec_t<val_type>(cols, &buf[r*cols]);
} else {
vec_set.resize(cols);
for(size_t c = 0; c < cols; c++)
vec_set[c] = dvec_t<val_type>(rows, &buf[c*rows]);
}
} // }}}
void inv_major() { // {{{
if(rows == 1 || cols == 1) {
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
init_vec_set();
} else if(rows == cols && !is_view()) { // inplace for square matrix
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < r; c++)
std::swap(at(r,c),at(c,r));
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
} else {
dmat_t tmp(*this);
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
resize(rows,cols);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp.at(r,c);
}
} // }}}
public:
// Default Constructor
dmat_t() {zero_init();}
// Copy Constructor
dmat_t(const dmat_t& other, major_t major_type_=default_major) { // {{{
zero_init();
if(other.major_type == major_type_)
*this = other;
else { // deep copy is required when major_type changes
major_type = major_type_;
resize(other.rows, other.cols);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = other.at(r,c);
}
} // }}}
// Copy Assignment
dmat_t& operator=(const dmat_t& other) { // {{{
if(this == &other) return *this;
if(other.is_view()) { // for view
if(mem_alloc_by_me) clear_space();
rows = other.rows; cols = other.cols; buf = other.buf; major_type = other.major_type;
init_vec_set();
mem_alloc_by_me = false;
} else { // deep copy
if(is_view() || rows!=other.rows || cols!=other.cols || major_type!=other.major_type) {
major_type = other.major_type;
resize(other.rows, other.cols);
}
memcpy(buf, other.buf, sizeof(val_type)*rows*cols);
}
return *this;
} // }}}
// View Constructor: allocate space if buf_ == NULL
explicit dmat_t(size_t rows_, size_t cols_, major_t major_type=default_major): gmat_t<val_type>(rows_,cols_), buf(NULL), mem_alloc_by_me(false), major_type(major_type) { // {{{
resize(rows,cols);
memset(this->buf, 0, sizeof(val_type)*rows*cols);
} // }}}
explicit dmat_t(size_t rows_, size_t cols_, val_type *buf, major_t major_type_): gmat_t<val_type>(rows_,cols_), buf(buf), mem_alloc_by_me(false), major_type(major_type_) { // {{{
init_vec_set();
} // }}}
// Fill Constructor
explicit dmat_t(size_t nr_copy, const dvec_t<val_type>& v, major_t major_type_=default_major) { // {{{
zero_init();
major_type = major_type_;
resize(nr_copy, v);
} // }}}
// dense_vector Converter
dmat_t(const dvec_t<val_type>& v, major_t major_type_=default_major) { // {{{
zero_init();
major_type = major_type_;
if(!v.is_view())
resize(1, v);
else {
rows = is_rowmajor()? 1: v.size();
cols = is_colmajor()? 1: v.size();
buf = v.buf;
init_vec_set();
}
} // }}}
template<typename T>
dmat_t(const smat_t<T>& sm, major_t major_type_=default_major) { // {{{
zero_init();
major_type = major_type_;
resize(sm.rows, sm.cols);
memset(buf, 0, sizeof(val_type)*rows*cols);
for(size_t i = 0; i < sm.rows; i++)
for(size_t idx = sm.row_ptr[i]; idx != sm.row_ptr[i+1]; idx++)
at(i, sm.col_idx[idx]) = sm.val_t[idx];
} // }}}
template<typename T>
dmat_t(const eye_t<T>& eye, major_t major_type_=default_major) { // {{{
zero_init();
major_type = major_type_;
resize(eye.rows, eye.cols);
memset(buf, 0, sizeof(val_type)*rows*cols);
for(size_t i = 0; i < rows; i++)
at(i,i) = 1;
} // }}}
#if defined(CPP11)
// Move Constructor
dmat_t(dmat_t&& m){
zero_init();
*this = std::move(m);
}
// Move Assignment
dmat_t& operator=(dmat_t&& other) { // {{{
if(this == &other) return *this;
clear_space();
rows = other.rows;
cols = other.cols;
buf = other.buf;
vec_set = std::move(other.vec_set);
mem_alloc_by_me = other.mem_alloc_by_me;
major_type = other.major_type;
other.zero_init();
return *this;
} // }}}
#endif
~dmat_t() {if(mem_alloc_by_me) {for(size_t i = 0; i < rows*cols; i++) buf[i]=-1;}clear_space();}
bool is_view() const {return mem_alloc_by_me==false;}
bool is_dense() const {return true;}
bool is_rowmajor() const {return major_type==ROWMAJOR;}
bool is_colmajor() const {return major_type==COLMAJOR;}
void clear_space() {if(mem_alloc_by_me) free(buf); zero_init();}
dmat_t get_view() const {return dmat_t(rows,cols,buf,major_type);}
dmat_t& grow_body() { // {{{
if(is_view()) {
dmat_t tmp_view = *this;
this->resize(rows,cols);
memcpy(buf, tmp_view.buf, sizeof(val_type)*rows*cols);
}
return *this;
} // }}}
dmat_t transpose() const { // {{{
dmat_t ret = get_view();
ret.to_transpose();
return ret;
} // }}}
// In-place functions
dmat_t& assign(const dmat_t& other) { // {{{
return assign((val_type)1.0, other);
} // }}}
template<typename T>
dmat_t& assign(T a, const dmat_t& other) { // {{{
if(a == T(0))
memset(buf, 0, sizeof(val_type)*rows*cols);
else if(a == T(1)) {
if(this == &other)
return *this;
if(is_rowmajor()) {
#pragma omp parallel for schedule(static)
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = other.at(r,c);
} else {
#pragma omp parallel for schedule (static)
for(size_t c = 0; c < cols; c++)
for(size_t r = 0; r < rows; r++)
at(r,c) = other.at(r,c);
}
} else {
if(is_rowmajor()) {
#pragma omp parallel for schedule(static)
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = a*other.at(r,c);
} else {
#pragma omp parallel for schedule(static)
for(size_t c = 0; c < cols; c++)
for(size_t r = 0; r < rows; r++)
at(r,c) = a*other.at(r,c);
}
}
return *this;
} // }}}
dmat_t& to_transpose() { // {{{
std::swap(rows,cols);
major_type = is_rowmajor()? COLMAJOR: ROWMAJOR;
init_vec_set();
return *this;
} // }}}
dmat_t& to_rowmajor() {if(is_colmajor()) inv_major(); return *this;}
dmat_t& to_colmajor() {if(is_rowmajor()) inv_major(); return *this;}
dmat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { // {{{
return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0] : NULL);
} // }}}
dmat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { // {{{
dmat_t tmp(*this);
resize(rows,cols);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp.at(row_perm? row_perm[r]: r, col_perm? col_perm[c]: c);
return *this;
} // }}}
// IO methods
void load_from_binary(const char *filename, major_t major_type_=default_major) { // {{{
FILE *fp = fopen(filename, "rb");
if(fp == NULL) {
fprintf(stderr, "Error: can't read the file (%s)!!\n", filename);
return;
}
load_from_binary(fp, major_type_, filename);
fclose(fp);
} // }}}
void load_from_binary(FILE *fp, major_t major_type_=default_major, const char *filename=NULL) { // {{{
clear_space();
zero_init();
size_t rows_, cols_;
if(fread(&rows_, sizeof(size_t), 1, fp) != 1)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
if(fread(&cols_, sizeof(size_t), 1, fp) != 1)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
std::vector<double> tmp(rows_*cols_);
if(fread(&tmp[0], sizeof(double), rows_*cols_, fp) != rows_*cols_)
fprintf(stderr, "Error: wrong input stream in %s.\n", filename);
dmat_t<double> tmp_view(rows_, cols_, &tmp[0], ROWMAJOR);
major_type = major_type_;
resize(rows_, cols_);
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
at(r,c) = tmp_view.at(r,c);
/*
major_type = major_type_;
if(major_type_ == ROWMAJOR) {
resize(rows_, cols_);
for(size_t idx=0; idx <rows*cols; idx++)
buf[idx] = (val_type)tmp[idx];
} else {
dmat_t tmp_view(rows, cols, &buf[0], ROWMAJOR);
*this = dmat_t(tmp_view, major_type_);
}
*/
} // }}}
void save_binary_to_file(const char *filename) { // {{{
FILE *fp = fopen(filename, "wb");
if(fp == NULL) {
fprintf(stderr,"Error: can't open file %s\n", filename);
exit(1);
}
save_binary_to_file(fp);
fclose(fp);
} // }}}
void save_binary_to_file(FILE *fp) { // {{{
fwrite(&rows, sizeof(size_t), 1, fp);
fwrite(&cols, sizeof(size_t), 1, fp);
std::vector<double> tmp(rows*cols);
size_t idx = 0;
for(size_t r = 0; r < rows; r++)
for(size_t c = 0; c < cols; c++)
tmp[idx++] = (double)at(r,c);
fwrite(&tmp[0], sizeof(double), tmp.size(), fp);
} // }}}
size_t size() const {return rows;}
void resize(size_t nr_copy, const vec_t &v) { // {{{
if(is_rowmajor()) {
size_t rows_ = nr_copy, cols_ = v.size();
resize(rows_, cols_);
size_t unit = sizeof(val_type)*v.size();
for(size_t r = 0; r < rows; r++)
memcpy(vec_set[r].data(),v.data(),unit);
} else {
size_t rows_ = v.size(), cols_ = nr_copy;
resize(rows_, cols_);
size_t unit = sizeof(val_type)*v.size();
for(size_t c = 0; c < cols; c++)
memcpy(vec_set[c].data(),v.data(),unit);
}
} // }}}
void resize(size_t rows_, size_t cols_) { // {{{
if(mem_alloc_by_me) {
if(rows_*cols_ != rows*cols)
buf = (val_type*) realloc(buf, sizeof(val_type)*rows_*cols_);
} else {
buf = (val_type*) malloc(sizeof(val_type)*rows_*cols_);
}
mem_alloc_by_me = true;
rows = rows_; cols = cols_;
init_vec_set();
} // }}}
dmat_t& lazy_resize(size_t rows_, size_t cols_, major_t major_type_=0) { // {{{
if(is_view() && rows_*cols_==rows*cols &&
(major_type_ == 0 || major_type==major_type_))
reshape(rows_,cols_);
else {
if(major_type_!=0) major_type = major_type_;
resize(rows_, cols_);
}
return *this;
} // }}}
dmat_t& reshape(size_t rows_, size_t cols_) { // {{{
assert(rows_*cols_ == rows*cols);
if(rows_ != rows || cols != cols) {
rows = rows_; cols = cols_;
init_vec_set();
}
return *this;
} // }}}
inline val_type& at(size_t r, size_t c) {return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r];}
inline const val_type& at(size_t r, size_t c) const {return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r];}
vec_t& operator[](size_t idx) {return vec_set[idx];}
const vec_t& operator[](size_t idx) const {return vec_set[idx];}
val_type* data() {return buf;}
const val_type* data() const {return buf;}
void print_mat(const char *str="", FILE *fp=stdout) const { // {{{
fprintf(fp, "===>%s<===\n", str);
fprintf(fp, "rows %ld cols %ld mem_alloc_by_me %d row_major %d buf %p\n",
rows, cols, mem_alloc_by_me, is_rowmajor(), buf);
for(size_t r = 0; r < rows; r++) {
for(size_t c = 0; c < cols; c++)
fprintf(fp, "%g ", at(r,c));
fprintf(fp, "\n");
}
} // }}}
}; // }}}
// Identity Matrix
template<typename val_type>
class eye_t : public gmat_t<val_type> { // {{{
public:
// size_t rows, cols; inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
eye_t (size_t rows_ = 0): gmat_t<val_type>(rows_,rows_){}
bool is_identity() const {return true;}
}; // }}}
// Sparse matrix format CSC & CSR
template<typename val_type>
class smat_t : public gmat_t<val_type> { // {{{
private:
bool mem_alloc_by_me;
bool read_from_binary;
unsigned char* binary_buf;
size_t binary_buf_len;
const static int HeaderSize =
sizeof(size_t)+sizeof(size_t)+sizeof(size_t)+sizeof(size_t);
void zero_init();
void allocate_space(size_t rows_, size_t cols_, size_t nnz_);
void csr_to_csc();
void csc_to_csr();
void update_max_nnz();
public: // static methods
static smat_t rand(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double lower=0.0, double upper=1.0) { // {{{
if(lower > upper) lower = upper;
smat_t ret;
size_t nnz_ = (size_t)(m*n*sparsity);
ret.allocate_space(m, n, nnz_);
for(size_t idx = 0; idx < nnz_; idx++) {
ret.val_t[idx] = rng.uniform(lower, upper);
ret.col_idx[idx] = rng.randint(0, n-1);
ret.row_ptr[rng.randint(1, m)] += 1;
}
for(size_t i = 1; i <= m; i++)
ret.row_ptr[i] += ret.row_ptr[i-1];
ret.csr_to_csc();
ret.update_max_nnz();
return ret;
} // }}}
static smat_t randn(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double mean=0.0, double std=1.0) { // {{{
smat_t ret;
size_t nnz_ = (size_t)(m*n*sparsity);
ret.allocate_space(m, n, nnz_);
for(size_t idx = 0; idx < nnz_; idx++) {
ret.val_t[idx] = (val_type)rng.normal(mean, std);
ret.col_idx[idx] = rng.randint(0, n-1);
ret.row_ptr[rng.randint(1,m)] += 1;
}
for(size_t i = 1; i <= m; i++)
ret.row_ptr[i] += ret.row_ptr[i-1];
ret.csr_to_csc();
ret.update_max_nnz();
return ret;
} // }}}
public:
//size_t rows, cols; // inherited from gmat_t
using gmat_t<val_type>::rows;
using gmat_t<val_type>::cols;
size_t nnz, max_row_nnz, max_col_nnz;
val_type *val, *val_t;
size_t *col_ptr, *row_ptr;
unsigned *row_idx, *col_idx;
// filetypes for loading smat_t
enum format_t {TXT=0, PETSc=1, BINARY=2, COMPRESSION=3};
// Default Constructor
smat_t() {zero_init();}
// Copy Constructor
smat_t(const smat_t& m) {zero_init(); *this = m;}
smat_t(const dmat_t<val_type>& m) { // {{{
zero_init();
dmat_iterator_t<val_type> entry_it(m);
load_from_iterator(m.rows, m.cols, entry_it.get_nnz(), &entry_it);
} //}}}
smat_t(const eye_t<val_type>& eye) { // {{{
zero_init();
allocate_space(eye.rows, eye.rows, 0);
for(size_t i = 0; i < eye.rows; i++) {
row_ptr[i+1] = i+1;
col_idx[i] = i;
val_t[i] = (val_type)1;
}
for(size_t j = 0; j < eye.cols; j++) {
col_ptr[j+1] = j+1;
row_idx[j] = j;
val[j] = (val_type)1;
}
} // }}}
smat_t(size_t rows_, size_t cols_, size_t nnz_=0){ // {{{
zero_init();
allocate_space(rows_, cols_, nnz_);
} // }}}
// Copy Assignment
smat_t& operator=(const smat_t& other) { // {{{
if(this == &other) return *this;
if(mem_alloc_by_me) clear_space();
if(other.is_view()) // for view
memcpy(this, &other, sizeof(smat_t));
else { // deep copy
*this = other.get_view();
grow_body();
}
return *this;
} // }}}
#if defined(CPP11)
// Move Constructor
smat_t(smat_t&& m){zero_init(); *this = std::move(m);}
// Move Assignment
smat_t& operator=(smat_t&& other) { // {{{
if(this == &other) return *this;
clear_space();
memcpy(this, &other, sizeof(smat_t));
other.zero_init();
return *this;
} // }}}
#endif
// Destructor
~smat_t(){ clear_space();}
bool is_view() const {return mem_alloc_by_me==false;}
bool is_sparse() const {return true;}
void clear_space();
smat_t get_view() const;
smat_t& grow_body();
smat_t transpose() const; // return a transpose view
//const smat_t transpose() const; // return a transpose view
// In-place functions
smat_t& to_transpose(); // return a transpose view
smat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm);
smat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL);
smat_subset_iterator_t<val_type> row_subset_it(const std::vector<unsigned> &subset) const;
smat_subset_iterator_t<val_type> row_subset_it(const unsigned *subset, int subset_size) const;
smat_subset_iterator_t<val_type> col_subset_it(const std::vector<unsigned> &subset) const;
smat_subset_iterator_t<val_type> col_subset_it(const unsigned *subset, int subset_size) const;
smat_t row_subset(const std::vector<unsigned> &subset) const;
smat_t row_subset(const unsigned *subset, int subset_size) const;
size_t nnz_of_row(unsigned i) const {return (row_ptr[i+1]-row_ptr[i]);}
size_t nnz_of_col(unsigned i) const {return (col_ptr[i+1]-col_ptr[i]);}
// smat-vector multiplication
val_type* Xv(const val_type* v, val_type* Xv) const;
dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv) const;
val_type* XTu(const val_type* u, val_type* XTu) const;
dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu) const;
// IO methods
void load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type>* entry_it);
void load(size_t _rows, size_t _cols, size_t _nnz, const char *filename, format_t fmt);
void load_from_PETSc(const char *filename);
void load_from_PETSc(FILE *fp, const char *filename=NULL);
void save_PETSc_to_file(const char *filename) const;
void save_PETSc_to_file(FILE *fp) const;
void load_from_binary(const char *filename);
void save_binary_to_file(const char *filename) const ;
// used for MPI verions
void from_mpi(){ // {{{
mem_alloc_by_me = true;
max_col_nnz = 0;
for(size_t c = 0; c < cols; c++)
max_col_nnz = std::max(max_col_nnz, nnz_of_col(c));
} // }}}
val_type get_global_mean() const;
void remove_bias(val_type bias=0);
void print_mat(const char *str="", FILE *fp=stdout) const { // {{{
fprintf(fp, "===>%s<===\n", str);
fprintf(fp, "rows,cols,nnz = %lu, %lu, %lu\n", rows, cols, nnz);
fprintf(fp, "col_ptr, row_idx, val = %p, %p, %p\n", col_ptr, row_idx, val);
fprintf(fp, "row_ptr, col_idx, val_t = %p, %p, %p\n", row_ptr, col_idx, val_t);
fprintf(fp, "mem_alloc_by_me = %d\n", mem_alloc_by_me);
fprintf(fp, "read_from_binary = %d\n", read_from_binary);
} // }}}
}; // }}}
// Lapack and Blas support {{{
#ifdef _WIN32
#define ddot_ ddot
#define sdot_ sdot
#define daxpy_ daxpy
#define saxpy_ saxpy
#define dcopy_ dcopy
#define scopy_ scopy
#define dgemm_ dgemm
#define sgemm_ sgemm
#define dposv_ dposv
#define sposv_ sposv
#define dgesdd_ dgesdd
#define sgesdd_ sgesdd
#endif
extern "C" {
double ddot_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *);
float sdot_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *);
ptrdiff_t dscal_(ptrdiff_t *, double *, double *, ptrdiff_t *);
ptrdiff_t sscal_(ptrdiff_t *, float *, float *, ptrdiff_t *);
ptrdiff_t daxpy_(ptrdiff_t *, double *, double *, ptrdiff_t *, double *, ptrdiff_t *);
ptrdiff_t saxpy_(ptrdiff_t *, float *, float *, ptrdiff_t *, float *, ptrdiff_t *);
double dcopy_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *);
float scopy_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *);
void dgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc);
void sgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc);
int dposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info);
int sposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info);
void dgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
void sgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
}
template<typename val_type> val_type dot(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline double dot(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return ddot_(len,x,xinc,y,yinc);}
template<> inline float dot(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return sdot_(len,x,xinc,y,yinc);}
template<typename val_type> val_type scal(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *);
template<> inline double scal(ptrdiff_t *len, double *a, double *x, ptrdiff_t *xinc) { return dscal_(len,a,x,xinc);}
template<> inline float scal(ptrdiff_t *len, float *a, float *x, ptrdiff_t *xinc) { return sscal_(len,a,x,xinc);}
template<typename val_type> ptrdiff_t axpy(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline ptrdiff_t axpy(ptrdiff_t *len, double *alpha, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return daxpy_(len,alpha,x,xinc,y,yinc);};
template<> inline ptrdiff_t axpy(ptrdiff_t *len, float *alpha, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return saxpy_(len,alpha,x,xinc,y,yinc);};
template<typename val_type> val_type copy(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *);
template<> inline double copy(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return dcopy_(len,x,xinc,y,yinc);}
template<> inline float copy(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return scopy_(len,x,xinc,y,yinc);}
template<typename val_type> void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, val_type *alpha, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, val_type *beta, val_type *c, ptrdiff_t *ldc);
template<> inline void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc) { dgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); }
template<> inline void gemm<float>(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc) { sgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); }
template<typename val_type> int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, ptrdiff_t *info);
template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info) { return dposv_(uplo, n, nrhs, a, lda, b, ldb, info); }
template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info) { return sposv_(uplo, n, nrhs, a, lda, b, ldb, info); }
template<typename val_type> void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, val_type* a, ptrdiff_t* lda, val_type* s, val_type* u, ptrdiff_t* ldu, val_type* vt, ptrdiff_t* ldvt, val_type* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info);
template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return dgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); }
template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return sgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); }
// }}}
// <x,y>
template<typename val_type>
val_type do_dot_product(const val_type *x, const val_type *y, size_t size) { // {{{
val_type *xx = const_cast<val_type*>(x);
val_type *yy = const_cast<val_type*>(y);
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
return dot(&len, xx, &inc, yy, &inc);
} // }}}
template<typename val_type>
val_type do_dot_product(const dvec_t<val_type> &x, const dvec_t<val_type> &y) { // {{{
assert(x.size() == y.size());
return do_dot_product(x.data(), y.data(), x.size());
} // }}}
template<typename val_type>
val_type do_dot_product(const dmat_t<val_type> &x, const dmat_t<val_type> &y) { // {{{
assert(x.rows == y.rows && x.cols == y.cols);
if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor()))
return do_dot_product(x.data(), y.data(), x.rows*x.cols);
else {
val_type ret = 0.0;
const dmat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose();
const dmat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose();
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t i = 0; i < xx.rows; i++) {
double ret_local = 0.0;
for(size_t j = 0; j < xx.cols; j++)
ret_local += xx.at(i,j)*yy.at(i,j);
ret += ret_local;
}
return (val_type)ret;
}
} // }}}
// y = alpha*x + y
template<typename val_type, typename T>
void do_axpy(T alpha, const val_type *x, val_type *y, size_t size) { // {{{
if(alpha == 0) return;
val_type alpha_ = (val_type)alpha;
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
val_type *xx = const_cast<val_type*>(x);
axpy(&len, &alpha_, xx, &inc, y, &inc);
} // }}}
template<typename val_type, typename T>
void do_axpy(T alpha, const dvec_t<val_type> &x, dvec_t<val_type> &y) { // {{{
do_axpy(alpha, x.data(), y.data(), x.size());
} // }}}
template<typename val_type, typename T>
void do_axpy(T alpha, const dmat_t<val_type> &x, dmat_t<val_type> &y) { // {{{
assert(x.rows == y.rows && x.cols == y.cols);
if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor()))
do_axpy(alpha, x.data(), y.data(), x.rows*x.cols);
else {
if(x.rows > x.cols) {
#pragma omp parallel for schedule(static)
for(size_t i = 0; i < x.rows; i++)
for(size_t j = 0; j < x.cols; j++)
y.at(i,j) += alpha*x.at(i,j);
} else {
#pragma omp parallel for schedule(static)
for(size_t j = 0; j < x.cols; j++)
for(size_t i = 0; i < x.rows; i++)
y.at(i,j) += alpha*x.at(i,j);
}
}
} // }}}
// x *= alpha
template<typename val_type, typename T>
void do_scale(T alpha, val_type *x, size_t size) { // {{{
if(alpha == 0.0) {
memset(x, 0, sizeof(val_type)*size);
} else if (alpha == 1.0) {
return;
} else {
val_type alpha_minus_one = (val_type)(alpha-1);
do_axpy(alpha_minus_one, x, x, size);
}
} // }}}
template<typename val_type, typename T>
val_type do_scale(T alpha, dvec_t<val_type> &x) { // {{{
do_scale(alpha, x.data(), x.size());
} // }}}
template<typename val_type, typename T>
val_type do_scale(T alpha, dmat_t<val_type> &x) { // {{{
do_scale(alpha, x.data(), x.rows*x.cols);
} // }}}
// y = x
template<typename val_type>
void do_copy(const val_type *x, val_type *y, size_t size) { // {{{
if(x == y) return;
ptrdiff_t inc = 1;
ptrdiff_t len = (ptrdiff_t) size;
val_type *xx = const_cast<val_type*>(x);
copy(&len, xx, &inc, y, &inc);
} // }}}
// A, B, C are stored in column major!
template<typename val_type, typename T1, typename T2>
void dmat_x_dmat_colmajor(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { // {{{
ptrdiff_t mm = (ptrdiff_t)m, nn = (ptrdiff_t)n, kk = (ptrdiff_t)k;
ptrdiff_t lda = trans_A? kk:mm, ldb = trans_B? nn:kk, ldc = mm;
char transpose = 'T', notranspose = 'N';
char *transa = trans_A? &transpose: ¬ranspose;
char *transb = trans_B? &transpose: ¬ranspose;
val_type alpha_ = (val_type) alpha;
val_type beta_ = (val_type) beta;
val_type *AA = const_cast<val_type*>(A);
val_type *BB = const_cast<val_type*>(B);
gemm(transa, transb, &mm, &nn, &kk, &alpha_, AA, &lda, BB, &ldb, &beta_, C, &ldc);
} // }}}
// C = alpha*A*B + beta*C
// C : m * n, k is the dimension of the middle
// A, B, C are stored in row major!
template<typename val_type, typename T1, typename T2>
void dmat_x_dmat(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { // {{{
dmat_x_dmat_colmajor(alpha, B, trans_B, A, trans_A, beta, C, n, m, k);
} //}}}
// C = A'*B
// C : m*n, k is the dimension of the middle
// A, B, C are stored in row major!
template<typename val_type>
void dmat_trans_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { // {{{
bool trans = true; dmat_x_dmat(val_type(1.0), A, trans, B, !trans, val_type(0.0), C, m, n, k);
} // }}}
// C=A*B
// A, B, C are stored in row major!
template<typename val_type>
void dmat_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { // {{{
bool trans = true; dmat_x_dmat(val_type(1.0), A, !trans, B, !trans, val_type(0.0), C, m, n, k);
} // }}}
// Input: an n*k row-major matrix H
// Output: an k*k matrix H^TH
template<typename val_type>
void doHTH(const val_type *H, val_type *HTH, size_t n, size_t k) { // {{{
bool transpose = true;
dmat_x_dmat_colmajor(val_type(1.0), H, !transpose, H, transpose, val_type(0.0), HTH, k, k, n);
} // }}}
// Solve Ax = b, A is symmetric positive definite, b is overwritten with the result x
// A will be modifed by internal Lapack. Make copy when necessary
template<typename val_type>
bool ls_solve_chol(val_type *A, val_type *b, size_t n) { // {{{
ptrdiff_t nn=n, lda=n, ldb=n, nrhs=1, info=0;
char uplo = 'U';
posv(&uplo, &nn, &nrhs, A, &lda, b, &ldb, &info);
return (info == 0);
} // }}}
// Solve AX = B, A is symmetric positive definite, B is overwritten with the result X
// A is a m-by-m matrix, while B is a m-by-n matrix stored in col_major
// A will be modifed by internal Lapack. Make copy when necessary
template<typename val_type>
bool ls_solve_chol_matrix_colmajor(val_type *A, val_type *B, size_t m, size_t n = size_t(0)) { // {{{
ptrdiff_t mm=m, lda=m, ldb=m, nrhs=n, info=0;
char uplo = 'U';
posv(&uplo, &mm, &nrhs, A, &lda, B, &ldb, &info);
return (info == 0);
} // }}}
// Functions for dmat_t type
// C = alpha*A*B + beta*C
// C : m * n, k is the dimension of the middle
template<typename val_type, typename T1, typename T2>
dmat_t<val_type>& dmat_x_dmat(T1 alpha, const dmat_t<val_type>& A, const dmat_t<val_type>& B, T2 beta, dmat_t<val_type>& C) { // {{{
assert(A.cols == B.rows);
dmat_t<val_type> AA = A.get_view(), BB = B.get_view();
C.lazy_resize(AA.rows, BB.cols);
if (C.is_rowmajor()) {
bool trans_A = A.is_rowmajor()? false : true;
bool trans_B = B.is_rowmajor()? false : true;
dmat_x_dmat(alpha, AA.data(), trans_A, BB.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols);
} else {
bool trans_A = A.is_colmajor()? false : true;
bool trans_B = B.is_colmajor()? false : true;
dmat_x_dmat_colmajor(alpha, AA.data(), trans_A, BB.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols);
}
return C;
} // }}}
// C=A*B
template<typename val_type>
dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type>& A, const dmat_t<val_type>& B, dmat_t<val_type>& C) { // {{{
return dmat_x_dmat(val_type(1.0), A, B, val_type(0.0), C);
} // }}}
template<typename val_type>
dmat_t<val_type> operator*(const dmat_t<val_type>& A, const dmat_t<val_type>& B) { // {{{
dmat_t<val_type> C(A.rows,B.cols);
dmat_x_dmat(A,B,C);
return C;
} // }}}
// Solve AX = B, A is symmetric positive definite, return X
template<typename val_type>
dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B) { // {{{
dmat_t<val_type> X(B, COLMAJOR); X.grow_body();
dmat_t<val_type> AA(A); AA.grow_body();
if(ls_solve_chol_matrix_colmajor(AA.data(), X.data(), AA.rows, X.cols) == false)
fprintf(stderr, "error to apply ls_solve_cho_matrix_colmajor");
return X;
} // }}}
// SVD [U S V] = SVD(A),
template<typename val_type>
class svd_solver_t { // {{{
private:
char jobz;
ptrdiff_t mm, nn, min_mn, max_mn, lda, ldu, ldvt, lwork1, lwork2, lwork, info;
std::vector<val_type> u_buf, v_buf, s_buf, work;
std::vector<ptrdiff_t> iwork;
size_t k;
void prepare_parameter(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced) { // {{{
k = std::min(A.rows, A.cols);
mm = (ptrdiff_t)A.rows;
nn = (ptrdiff_t)A.cols;
min_mn = std::min(mm,nn);
max_mn = std::max(mm,nn);
lda = mm;
ldu = mm;
ldvt = reduced? min_mn : nn;
lwork1 = 3*min_mn*min_mn + std::max(max_mn, 4*min_mn*min_mn + 4*min_mn);
lwork2 = 3*min_mn + std::max(max_mn, 4*min_mn*min_mn + 3*min_mn + max_mn);
lwork = 2 * std::max(lwork1, lwork2); // due to differences between lapack 3.1 and 3.4
info = 0;
work.resize(lwork);
iwork.resize((size_t)(8*min_mn));
if(!S.is_view() || S.size() != k)
S.resize(k);
if(reduced) {
jobz = 'S';
U.lazy_resize(A.rows, k, COLMAJOR);
V.lazy_resize(A.cols, k, ROWMAJOR);
} else {
jobz = 'A';
U.lazy_resize(A.rows, A.rows, COLMAJOR);
V.lazy_resize(A.cols, A.cols, ROWMAJOR);
}
} // }}}
public:
svd_solver_t() {}
bool solve(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true) { // {{{
if(A.is_rowmajor())
return solve(A.transpose(), V, S, U, reduced);
else {
dmat_t<val_type> AA(A.get_view());
prepare_parameter(AA, U, S, V, reduced);
#if defined(CPP11)
gesdd(&jobz, &mm, &nn, AA.data(), &lda,
S.data(), U.data(), &ldu, V.data(), &ldvt, work.data(), &lwork, iwork.data(), &info);
#else
gesdd(&jobz, &mm, &nn, AA.data(), &lda,
S.data(), U.data(), &ldu, V.data(), &ldvt, &work[0], &lwork, &iwork[0], &info);
#endif
return (info == 0);
}
} // }}}
}; // }}}
template<typename val_type>
void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true) { // {{{
svd_solver_t<val_type> solver;
solver.solve(A, U, S, V, reduced);
} // }}}
template<typename val_type>
smat_t<val_type> sprand(size_t m, size_t n, double sparsity) { // {{{
static rng_t rng;
return smat_t<val_type>::rand(rng, m, n, sparsity);
} // }}}
template<typename val_type>
smat_t<val_type> sprandn(size_t m, size_t n, double sparsity) { // {{{{
static rng_t rng;
return smat_t<val_type>::randn(rng, m, n, sparsity);
} // }}}
template<typename val_type>
dmat_t<val_type> drand(size_t m, size_t n, major_t major_type_=default_major) { // {{{
static rng_t rng;
return dmat_t<val_type>::rand(rng, m, n, 0.0, 1.0, major_type_ );
} // }}}
template<typename val_type>
dmat_t<val_type> drandn(size_t m, size_t n, major_t major_type_=default_major) { // {{{{
static rng_t rng;
return dmat_t<val_type>::randn(rng, m, n, 0.0, 1.0, major_type_);
} // }}}
/*-------------- Iterators -------------------*/
template<typename val_type>
class entry_t{ // {{{
public:
unsigned i, j; val_type v, weight;
entry_t(int ii=0, int jj=0, val_type vv=0, val_type ww=1.0): i(ii), j(jj), v(vv), weight(ww){}
}; // }}}
template<typename val_type>
class entry_iterator_t { // {{{
public:
size_t nnz;
virtual entry_t<val_type> next() = 0;
}; // }}}
#define MAXLINE 10240
// Iterator for files with (i,j,v) tuples
template<typename val_type>
class file_iterator_t: public entry_iterator_t<val_type> { // {{{
public:
file_iterator_t(size_t nnz_, const char* filename, size_t start_pos=0);
~file_iterator_t(){ if (fp) fclose(fp); }
entry_t<val_type> next();
private:
size_t nnz;
FILE *fp;
char line[MAXLINE];
}; // }}}
// smat_t iterator
template<typename val_type>
class smat_iterator_t: public entry_iterator_t<val_type> { // {{{
public:
//enum {ROWMAJOR, COLMAJOR};
// major: smat_iterator_t<val_type>::ROWMAJOR or smat_iterator_t<val_type>::COLMAJOR
smat_iterator_t(const smat_t<val_type>& M, major_t major = ROWMAJOR);
~smat_iterator_t() {}
entry_t<val_type> next();
private:
size_t nnz;
unsigned *col_idx;
size_t *row_ptr;
val_type *val_t;
size_t rows, cols, cur_idx;
size_t cur_row;
}; // }}}
// smat_t subset iterator
template<typename val_type>
class smat_subset_iterator_t: public entry_iterator_t<val_type> { // {{{
public:
//enum {ROWMAJOR, COLMAJOR};
// major: smat_iterator_t<val_type>::ROWMAJOR or smat_iterator_t<val_type>::COLMAJOR
smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset, size_t size, bool remapping=false, major_t major = ROWMAJOR);
~smat_subset_iterator_t() {}
size_t get_nnz() {return nnz;}
size_t get_rows() {return major==ROWMAJOR? remapping? subset.size(): rows: rows;}
size_t get_cols() {return major==ROWMAJOR? cols: remapping? subset.size():cols;}
entry_t<val_type> next();
private:
size_t nnz;
unsigned *col_idx;
size_t *row_ptr;
val_type *val_t;
size_t rows, cols, cur_idx;
size_t cur_row;
std::vector<unsigned>subset;
major_t major;
bool remapping;
}; // }}}
// dmat_t iterator
template<typename val_type>
class dmat_iterator_t: public entry_iterator_t<val_type> { // {{{
public:
dmat_iterator_t(const dmat_t<val_type>& M, double threshold=1e-12) : M(M), nnz(M.rows*M.cols), rows(M.rows), cols(M.cols), threshold(fabs(threshold)) { // {{{
cur_row = 0;
cur_col = 0;
nnz = 0;
bool find_firstnz = true;
for(size_t i = 0; i < rows; i++)
for(size_t j = 0; j < cols; j++)
if(fabs((double)M.at(i,j)) >= threshold) {
if(find_firstnz) {
cur_row = i;
cur_col = j;
find_firstnz = false;
}
nnz++ ;
}
// printf("cur_row %ld cur_col %ld nnz %ld\n", cur_row, cur_col, nnz);
} // }}}
~dmat_iterator_t() {}
entry_t<val_type> next() { // {{{
entry_t<val_type> entry(cur_row, cur_col, M.at(cur_row, cur_col));
do {
cur_col += 1;
if(cur_col == cols) {
cur_row += 1;
cur_col = 0;
}
} while(fabs((double)M.at(cur_row, cur_col)) <= threshold );
return entry;
} // }}}
size_t get_nnz() const {return nnz;}
private:
size_t nnz;
const dmat_t<val_type>& M;
size_t rows, cols, cur_row, cur_col;
double threshold;
}; // }}}
// -------------- Implementation --------------
template<typename val_type>
inline void smat_t<val_type>::zero_init() { // {{{
mem_alloc_by_me = false;
read_from_binary = false;
val=val_t=NULL;
col_ptr=row_ptr=NULL;
row_idx=col_idx=NULL;
rows=cols=nnz=max_col_nnz=max_row_nnz=0;
} // }}}
template<typename val_type>
void smat_t<val_type>::allocate_space(size_t rows_, size_t cols_, size_t nnz_) { // {{{
if(mem_alloc_by_me)
clear_space();
rows = rows_; cols = cols_; nnz = nnz_;
val = MALLOC(val_type, nnz); val_t = MALLOC(val_type, nnz);
row_idx = MALLOC(unsigned, nnz); col_idx = MALLOC(unsigned, nnz);
row_ptr = MALLOC(size_t, rows+1); col_ptr = MALLOC(size_t, cols+1);
memset(row_ptr,0,sizeof(size_t)*(rows+1));
memset(col_ptr,0,sizeof(size_t)*(cols+1));
mem_alloc_by_me = true;
} // }}}
template<typename val_type>
void smat_t<val_type>::clear_space() { // {{{
if(mem_alloc_by_me) {
if(read_from_binary)
free(binary_buf);
else {
if(val)free(val); if(val_t)free(val_t);
if(row_ptr)free(row_ptr);if(row_idx)free(row_idx);
if(col_ptr)free(col_ptr);if(col_idx)free(col_idx);
}
}
zero_init();
} // }}}
template<typename val_type>
smat_t<val_type> smat_t<val_type>::get_view() const { // {{{
if(is_view())
return *this;
else {
smat_t tmp;
memcpy(&tmp, this, sizeof(smat_t));
tmp.mem_alloc_by_me = false;
return tmp;
}
} // }}}
template<typename val_type>
smat_t<val_type>& smat_t<val_type>::grow_body() { // {{{
if(is_view()) {
smat_t tmp = *this; // a copy of the view
col_ptr = MALLOC(size_t, cols+1); memcpy(col_ptr, tmp.col_ptr, sizeof(size_t)*cols+1);
row_idx = MALLOC(unsigned, nnz); memcpy(row_idx, tmp.row_idx, sizeof(unsigned)*nnz);
val = MALLOC(val_type, nnz); memcpy(val, tmp.val, sizeof(val_type)*nnz);
row_ptr = MALLOC(size_t, rows+1); memcpy(row_ptr, tmp.row_ptr, sizeof(size_t)*rows+1);
col_idx = MALLOC(unsigned, nnz); memcpy(col_idx, tmp.col_idx, sizeof(unsigned)*nnz);
val_t = MALLOC(val_type, nnz); memcpy(val_t, tmp.val_t, sizeof(val_type)*nnz);
mem_alloc_by_me = true;
}
return *this;
} // }}}
template<typename val_type>
smat_t<val_type> smat_t<val_type>::transpose() const{ // {{{
smat_t<val_type> mt = get_view().to_transpose();
/*
mt.cols = rows; mt.rows = cols; mt.nnz = nnz;
mt.val = val_t; mt.val_t = val;
mt.col_ptr = row_ptr; mt.row_ptr = col_ptr;
mt.col_idx = row_idx; mt.row_idx = col_idx;
mt.max_col_nnz=max_row_nnz; mt.max_row_nnz=max_col_nnz;
*/
return mt;
} // }}}
template<typename val_type>
smat_t<val_type>& smat_t<val_type>::to_transpose() { // {{{
std::swap(rows,cols);
std::swap(val,val_t);
std::swap(row_ptr,col_ptr);
std::swap(row_idx,col_idx);
std::swap(max_col_nnz, max_row_nnz);
return *this;
} // }}}
template<typename val_type>
smat_t<val_type>& smat_t<val_type>::apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { // {{{
apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0]: NULL);
} // }}}
template<typename val_type>
smat_t<val_type>& smat_t<val_type>::apply_permutation(const unsigned *row_perm, const unsigned *col_perm) { // {{{
if(row_perm!=NULL) {
for(size_t idx = 0; idx < nnz; idx++) row_idx[idx] = row_perm[row_idx[idx]];
csc_to_csr();
csr_to_csc();
}
if(col_perm!=NULL) {
for(size_t idx = 0; idx < nnz; idx++) col_idx[idx] = col_perm[col_idx[idx]];
csr_to_csc();
csc_to_csr();
}
} // }}}
template<typename val_type>
smat_subset_iterator_t<val_type> smat_t<val_type>::row_subset_it(const std::vector<unsigned> &subset) const { // {{{
return row_subset_it(&subset[0], (int)subset.size());
} // }}}
template<typename val_type>
smat_subset_iterator_t<val_type> smat_t<val_type>::row_subset_it(const unsigned *subset, int subset_size) const { // {{{
return smat_subset_iterator_t<val_type> (*this, subset, subset_size);
} // }}}
template<typename val_type>
smat_subset_iterator_t<val_type> smat_t<val_type>::col_subset_it(const std::vector<unsigned> &subset) const { // {{{
return col_subset_it(&subset[0], (int)subset.size());
} // }}}
template<typename val_type>
smat_subset_iterator_t<val_type> smat_t<val_type>::col_subset_it(const unsigned *subset, int subset_size) const { // {{{
bool remmapping = false; // no remapping by default
return smat_subset_iterator_t<val_type> (*this, subset, subset_size, remmapping, smat_subset_iterator_t<val_type>::COLMAJOR);
} // }}}
template<typename val_type>
smat_t<val_type> smat_t<val_type>::row_subset(const std::vector<unsigned> &subset) const { // {{{
return row_subset(&subset[0], (int)subset.size());
} // }}}
template<typename val_type>
smat_t<val_type> smat_t<val_type>::row_subset(const unsigned *subset, int subset_size) const { // {{{
smat_subset_iterator_t<val_type> it(*this, subset, subset_size);
smat_t<val_type> sub_smat;
sub_smat.load_from_iterator(subset_size, cols, it.get_nnz(), &it);
return sub_smat;
} // }}}
template<typename val_type>
val_type smat_t<val_type>::get_global_mean() const { // {{{
val_type sum=0;
for(size_t idx = 0; idx < nnz; idx++) sum += val[idx];
return sum/(val_type)nnz;
} // }}}
template<typename val_type>
void smat_t<val_type>::remove_bias(val_type bias) { // {{{
if(bias) {
for(size_t idx = 0; idx < nnz; idx++) {
val[idx] -= bias;
val_t[idx] -= bias;
}
}
} // }}}
template<typename val_type>
val_type* smat_t<val_type>::Xv(const val_type *v, val_type *Xv) const { // {{{
for(size_t i = 0; i < rows; ++i) {
Xv[i] = 0;
for(size_t idx = row_ptr[i]; idx < row_ptr[i+1]; ++idx)
Xv[i] += val_t[idx] * v[col_idx[idx]];
}
return Xv;
} // }}}
template<typename val_type>
dvec_t<val_type>& smat_t<val_type>::Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv) const { // {{{
this->Xv(v.data(), Xv.data());
return Xv;
} // }}}
template<typename val_type>
val_type* smat_t<val_type>::XTu(const val_type *u, val_type *XTu) const { // {{{
for(size_t i = 0; i < cols; ++i) {
XTu[i] = 0;
for(size_t idx = col_ptr[i]; idx < col_ptr[i+1]; ++idx)
XTu[i] += val[idx] * u[row_idx[idx]];
}
return XTu;
} // }}}
template<typename val_type>
dvec_t<val_type>& smat_t<val_type>::XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu) const { // {{{
this->XTu(u.data(), XTu.data());
return XTu;
} // }}}
// Comparator for sorting rates into row/column comopression storage
template<typename val_type>
class SparseComp { // {{{
public:
const unsigned *row_idx;
const unsigned *col_idx;
SparseComp(const unsigned *row_idx_, const unsigned *col_idx_, bool isCSR=true) {
row_idx = (isCSR)? row_idx_: col_idx_;
col_idx = (isCSR)? col_idx_: row_idx_;
}
bool operator()(size_t x, size_t y) const {
return (row_idx[x] < row_idx[y]) || ((row_idx[x] == row_idx[y]) && (col_idx[x]< col_idx[y]));
}
}; // }}}
template<typename val_type>
void smat_t<val_type>::load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type> *entry_it) { // {{{
clear_space(); // clear any pre-allocated space in case of memory leak
rows =_rows,cols=_cols,nnz=_nnz;
allocate_space(rows,cols,nnz);
// a trick here to utilize the space the have been allocated
std::vector<size_t> perm(_nnz);
unsigned *tmp_row_idx = col_idx;
unsigned *tmp_col_idx = row_idx;
val_type *tmp_val = val;
for(size_t idx = 0; idx < _nnz; idx++){
entry_t<val_type> rate = entry_it->next();
row_ptr[rate.i+1]++;
col_ptr[rate.j+1]++;
tmp_row_idx[idx] = rate.i;
tmp_col_idx[idx] = rate.j;
tmp_val[idx] = rate.v;
perm[idx] = idx;
}
// sort entries into row-majored ordering
sort(perm.begin(), perm.end(), SparseComp<val_type>(tmp_row_idx, tmp_col_idx, true));
// Generate CSR format
for(size_t idx = 0; idx < _nnz; idx++) {
val_t[idx] = tmp_val[perm[idx]];
col_idx[idx] = tmp_col_idx[perm[idx]];
}
// Calculate nnz for each row and col
max_row_nnz = max_col_nnz = 0;
for(size_t r = 1; r <= rows; r++) {
max_row_nnz = std::max(max_row_nnz, row_ptr[r]);
row_ptr[r] += row_ptr[r-1];
}
for(size_t c = 1; c <= cols; c++) {
max_col_nnz = std::max(max_col_nnz, col_ptr[c]);
col_ptr[c] += col_ptr[c-1];
}
// Transpose CSR into CSC matrix
for(size_t r = 0; r < rows; ++r){
for(size_t idx = row_ptr[r]; idx < row_ptr[r+1]; idx++){
size_t c = (size_t) col_idx[idx];
row_idx[col_ptr[c]] = r;
val[col_ptr[c]++] = val_t[idx];
}
}
for(size_t c = cols; c > 0; --c) col_ptr[c] = col_ptr[c-1];
col_ptr[0] = 0;
} // }}}
template<typename val_type>
void smat_t<val_type>::load(size_t _rows, size_t _cols, size_t _nnz, const char* filename, typename smat_t<val_type>::format_t fmt) { // {{{
if(fmt == smat_t<val_type>::TXT) {
file_iterator_t<val_type> entry_it(_nnz, filename);
load_from_iterator(_rows, _cols, _nnz, &entry_it);
} else if(fmt == smat_t<val_type>::PETSc) {
load_from_PETSc(filename);
} else {
fprintf(stderr, "Error: filetype %d not supported\n", fmt);
return ;
}
} // }}}
template<typename val_type>
void smat_t<val_type>::save_PETSc_to_file(const char *filename) const { // {{{
FILE *fp = fopen(filename, "wb");
if(fp == NULL) {
fprintf(stderr,"Error: can't open file %s\n", filename);
exit(1);
}
save_PETSc_to_file(fp);
} // }}}
template<typename val_type>
void smat_t<val_type>::save_PETSc_to_file(FILE *fp) const { // {{{
const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015;
int32_t int_buf[3] = {(int32_t)LONG_FILE, (int32_t)rows, (int32_t)cols};
std::vector<int32_t> nnz_row(rows);
for(size_t r = 0; r < rows; r++)
nnz_row[r] = (int)nnz_of_row(r);
fwrite(&int_buf[0], sizeof(int32_t), 3, fp);
fwrite(&nnz, sizeof(size_t), 1, fp);
fwrite(&nnz_row[0], sizeof(int32_t), rows, fp);
fwrite(&col_idx[0], sizeof(unsigned), nnz, fp);
// the following part == fwrite(val_t, sizeof(double), nnz, fp);
const size_t chunksize = 1024;
double buf[chunksize];
size_t idx = 0;
while(idx + chunksize < nnz) {
for(size_t i = 0; i < chunksize; i++)
buf[i] = (double) val_t[idx+i];
fwrite(&buf[0], sizeof(double), chunksize, fp);
idx += chunksize;
}
size_t remaining = nnz - idx;
for(size_t i = 0; i < remaining; i++)
buf[i] = (double) val_t[idx+i];
fwrite(&buf[0], sizeof(double), remaining, fp);
} // }}}
template<typename val_type>
void smat_t<val_type>::load_from_PETSc(const char *filename) { // {{{
FILE *fp = fopen(filename, "rb");
if(fp == NULL) {
fprintf(stderr, "Error: can't read the file (%s)!!\n", filename);
return;
}
load_from_PETSc(fp, filename);
fclose(fp);
} // }}}
template<typename val_type>
void smat_t<val_type>::load_from_PETSc(FILE *fp, const char *filename) { // {{{
clear_space(); // clear any pre-allocated space in case of memory leak
const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015;
int32_t int_buf[3];
size_t headersize = 0;
headersize += sizeof(int)*fread(int_buf, sizeof(int), 3, fp);
int filetype = int_buf[0];
rows = (size_t) int_buf[1];
cols = (size_t) int_buf[2];
if(filetype == UNSIGNED_FILE) {
headersize += sizeof(int)*fread(int_buf, sizeof(int32_t), 1, fp);
nnz = (size_t) int_buf[0];
} else if (filetype == LONG_FILE){
headersize += sizeof(size_t)*fread(&nnz, sizeof(int64_t), 1, fp);
} else {
fprintf(stderr, "Error: wrong PETSc format in %s.\n", filename);
}
allocate_space(rows,cols,nnz);
// load CSR from the binary PETSc format
{ // {{{
// read row_ptr
std::vector<int32_t> nnz_row(rows);
headersize += sizeof(int32_t)*fread(&nnz_row[0], sizeof(int32_t), rows, fp);
row_ptr[0] = 0;
for(size_t r = 1; r <= rows; r++)
row_ptr[r] = row_ptr[r-1] + nnz_row[r-1];
// read col_idx
headersize += sizeof(int)*fread(&col_idx[0], sizeof(unsigned), nnz, fp);
// read val_t
const size_t chunksize = 1024;
double buf[chunksize];
size_t idx = 0;
while(idx + chunksize < nnz) {
headersize += sizeof(double)*fread(&buf[0], sizeof(double), chunksize, fp);
for(size_t i = 0; i < chunksize; i++)
val_t[idx+i] = (val_type) buf[i];
idx += chunksize;
}
size_t remaining = nnz - idx;
headersize += sizeof(double)*fread(&buf[0], sizeof(double), remaining, fp);
for(size_t i = 0; i < remaining; i++)
val_t[idx+i] = (val_type) buf[i];
} // }}}
csr_to_csc();
update_max_nnz();
} // }}}
template<typename val_type>
void smat_t<val_type>::csr_to_csc() { // {{{
memset(col_ptr, 0, sizeof(size_t)*(cols+1));
for(size_t idx = 0; idx < nnz; idx++)
col_ptr[col_idx[idx]+1]++;
for(size_t c = 1; c <= cols; c++)
col_ptr[c] += col_ptr[c-1];
for(size_t r = 0; r < rows; r++) {
for(size_t idx = row_ptr[r]; idx != row_ptr[r+1]; idx++) {
size_t c = (size_t) col_idx[idx];
row_idx[col_ptr[c]] = r;
val[col_ptr[c]++] = val_t[idx];
}
}
for(size_t c = cols; c > 0; c--)
col_ptr[c] = col_ptr[c-1];
col_ptr[0] = 0;
} // }}}
template<typename val_type>
void smat_t<val_type>::csc_to_csr() { // {{{
memset(row_ptr, 0, sizeof(size_t)*(rows+1));
for(size_t idx = 0; idx < nnz; idx++)
row_ptr[row_idx[idx]+1]++;
for(size_t r = 1; r <= rows; r++)
row_ptr[r] += row_ptr[r-1];
for(size_t c = 0; c < cols; c++) {
for(size_t idx = col_ptr[c]; idx != col_ptr[c+1]; idx++) {
size_t r = (size_t) row_idx[idx];
col_idx[row_ptr[r]] = c;
val_t[row_ptr[r]++] = val[idx];
}
}
for(size_t r = rows; r > 0; r--)
row_ptr[r] = row_ptr[r-1];
row_ptr[0] = 0;
} // }}}
template<typename val_type>
void smat_t<val_type>::update_max_nnz() { // {{{
max_row_nnz = max_col_nnz = 0;
for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c));
for(size_t r = 0; r < rows; r++) max_row_nnz = std::max(max_row_nnz, nnz_of_row(r));
} // }}}
template<typename val_type>
file_iterator_t<val_type>::file_iterator_t(size_t nnz_, const char* filename, size_t start_pos) { // {{{
nnz = nnz_;
fp = fopen(filename,"rb");
if(fp == NULL) {
fprintf(stderr, "Error: cannot read the file (%s)!!\n", filename);
return;
}
fseek(fp, start_pos, SEEK_SET);
} // }}}
template<typename val_type>
entry_t<val_type> file_iterator_t<val_type>::next() { // {{{
const int base10 = 10;
if(nnz > 0) {
--nnz;
if(fgets(&line[0], MAXLINE, fp)==NULL)
fprintf(stderr, "Error: reading error !!\n");
char *head_ptr = &line[0];
size_t i = strtol(head_ptr, &head_ptr, base10);
size_t j = strtol(head_ptr, &head_ptr, base10);
double v = strtod(head_ptr, &head_ptr);
return entry_t<val_type>(i-1, j-1, (val_type)v);
} else {
fprintf(stderr, "Error: no more entry to iterate !!\n");
return entry_t<val_type>(0,0,0);
}
} // }}}
template<typename val_type>
smat_iterator_t<val_type>::smat_iterator_t(const smat_t<val_type>& M, major_t major) { // {{{
nnz = M.nnz;
col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx;
row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr;
val_t = (major == ROWMAJOR)? M.val_t: M.val;
rows = (major==ROWMAJOR)? M.rows: M.cols;
cols = (major==ROWMAJOR)? M.cols: M.rows;
cur_idx = cur_row = 0;
} // }}}
template<typename val_type>
entry_t<val_type> smat_iterator_t<val_type>::next() { // {{{
while (cur_idx >= row_ptr[cur_row+1])
cur_row++;
if (nnz > 0)
nnz--;
else
fprintf(stderr,"Error: no more entry to iterate !!\n");
entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]);
cur_idx++;
return ret;
} // }}}
template<typename val_type>
smat_subset_iterator_t<val_type>::smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset, size_t size, bool remapping_, major_t major_) { // {{{
major = major_; remapping = remapping_;
col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx;
row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr;
val_t = (major == ROWMAJOR)? M.val_t: M.val;
rows = (major==ROWMAJOR)? (remapping?size:M.rows): (remapping?size:M.cols);
cols = (major==ROWMAJOR)? M.cols: M.rows;
this->subset.resize(size);
nnz = 0;
for(size_t i = 0; i < size; i++) {
unsigned idx = subset[i];
this->subset[i] = idx;
nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx);
}
sort(this->subset.begin(), this->subset.end());
cur_row = 0;
cur_idx = row_ptr[this->subset[cur_row]];
} // }}}
template<typename val_type>
entry_t<val_type> smat_subset_iterator_t<val_type>::next() { // {{{
while (cur_idx >= row_ptr[subset[cur_row]+1]) {
cur_row++;
cur_idx = row_ptr[subset[cur_row]];
}
if (nnz > 0)
nnz--;
else
fprintf(stderr,"Error: no more entry to iterate !!\n");
//entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]);
entry_t<val_type> ret_rowwise(remapping?cur_row:subset[cur_row], col_idx[cur_idx], val_t[cur_idx]);
entry_t<val_type> ret_colwise(col_idx[cur_idx], remapping?cur_row:subset[cur_row], val_t[cur_idx]);
//printf("%d %d\n", cur_row, col_idx[cur_idx]);
cur_idx++;
//return ret;
return major==ROWMAJOR? ret_rowwise: ret_colwise;
} // }}}
/*
H = X*W
X is an m*n
W is an n*k, row-majored array
H is an m*k, row-majored array
*/
template<typename val_type>
void smat_x_dmat(const smat_t<val_type> &X, const val_type* W, const size_t k, val_type *H) { // {{{
size_t m = X.rows;
#pragma omp parallel for schedule(dynamic,50) shared(X,W,H)
for(size_t i = 0; i < m; i++) {
val_type *Hi = &H[k*i];
memset(Hi,0,sizeof(val_type)*k);
for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) {
const val_type Xij = X.val_t[idx];
const val_type *Wj = &W[X.col_idx[idx]*k];
for(unsigned t = 0; t < k; t++)
Hi[t] += Xij*Wj[t];
}
}
} // }}}
template<typename val_type>
void smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { // {{{
assert(W.cols == H.cols && X.cols == W.rows && X.rows == H.rows);
assert(W.is_rowmajor() && H.is_rowmajor());
smat_x_dmat(1.0, X, W, 0.0, H, H);
//smat_x_dmat(X, W.data(), W.cols, H.data());
} // }}}
/*
H = a*X*W + b H0
X is an m*n
W is an n*k, row-majored array
H is an m*k, row-majored array
*/
template<typename val_type, typename T2, typename T3>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H) { // {{{
size_t m = X.rows;
val_type aa = (val_type) a;
val_type bb = (val_type) b;
if(a == T2(0)) {
if(bb == (val_type)0.0){
memset(H, 0, sizeof(val_type)*m*k);
return ;
} else {
if(H!=H0) {
do_copy(H0, H, m*k);
//memcpy(H, H0, sizeof(val_type)*m*k);
}
do_scale(bb, H, m*k);
}
return;
}
#pragma omp parallel for schedule(dynamic,64) shared(X, W, H, H0, aa,bb)
for(size_t i = 0; i < m; i++) {
val_type *Hi = &H[k*i];
if(bb == (val_type)0.0)
memset(Hi, 0, sizeof(val_type)*k);
else {
if(Hi!=&H0[k*i])
do_copy(&H0[k*i], Hi, k);
do_scale(bb, Hi, k);
}
for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) {
const val_type Xij = X.val_t[idx];
const val_type *Wj = &W[X.col_idx[idx]*k];
for(size_t t = 0; t < k; t++)
Hi[t] += aa*Xij*Wj[t];
}
}
}// }}}
template<typename val_type, typename T2>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H) { // {{{
smat_x_dmat(a, X, W, k, 1.0, H0, H);
} // }}}
template<typename val_type, typename T2, typename T3>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { // {{{
assert(W.cols == H0.cols && W.cols == H.cols && X.cols == W.rows && X.rows == H0.rows && X.rows == H.rows);
if(W.is_rowmajor()) {
if(H.is_rowmajor()) {
if(H0.is_rowmajor()){
smat_x_dmat(a, X, W.data(), W.cols, b, H0.data(), H.data());
} else {
H.assign(b, H0);
smat_x_dmat(a, X, W.data(), W.cols, 1.0, H.data(), H.data());
}
} else { // H is col_major
H.assign(b, H0);
// H += aXW
#pragma omp parallel for schedule(dynamic, 64) shared(X, W, H)
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){
size_t j = X.col_idx[idx];
const val_type &Xij = X.val_t[idx];
for(size_t t = 0; t < W.cols; t++)
H.at(i,t) += a*Xij*W.at(j,t);
}
}
}
} else { // W.is_colmajor
H.assign(b, H0);
if(H.is_colmajor()) {
#pragma omp parallel for schedule(static)
for(size_t j = 0; j < W.cols; j++)
X.Xv(W[j], H[j]);
} else { // H.is row_major
// H += aXW
#pragma omp parallel for schedule(dynamic, 64) shared(X, W, H)
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){
size_t j = X.col_idx[idx];
const val_type &Xij = X.val_t[idx];
for(size_t t = 0; t < W.cols; t++)
H.at(i,t) += a*Xij*W.at(j,t);
}
}
}
}
}// }}}
template<typename val_type, typename T2>
void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { // {{{
smat_x_dmat(a, X, W, 1.0, H0, H);
} // }}}
/*
* H = a*XW + b H0
* X is an m*n gmat
* W is an n*k dmat
* H is m*k dmat
*/
template<typename val_type, typename T2, typename T3>
void gmat_x_dmat(T2 a, const gmat_t<val_type>& X, const dmat_t<val_type>& W, T3 b, const dmat_t<val_type>& H0, dmat_t<val_type>& H) { // {{{
if(X.is_sparse())
smat_x_dmat(a, X.get_sparse(), W, b, H0, H);
else if(X.is_dense())
dmat_x_dmat(a, X.get_dense(), W, b, H0, H);
else if(X.is_identity()) {
H.assign(b, H0);
do_axpy(a, W, H);
}
} // }}}
/*
* H = XW
*
*/
template<typename val_type>
void gmat_x_dmat(const gmat_t<val_type>& X, const dmat_t<val_type>& W, dmat_t<val_type>& H) { // {{{
if(X.is_sparse())
smat_x_dmat(X.get_sparse(), W, H);
else if(X.is_dense())
dmat_x_dmat(X.get_dense(), W, H);
else if(X.is_identity())
H.assign(W);
} // }}}
/*
trace(W^T X H)
X is an m*n, sparse matrix
W is an m*k, row-majored array
H is an n*k, row-major
*/
template<typename val_type>
val_type trace_dmat_T_smat_dmat(const val_type *W, const smat_t<val_type> &X, const val_type *H, const size_t k) { // {{{
size_t m = X.rows;
double ret = 0;
#pragma omp parallel for schedule(dynamic,50) shared(X,H,W) reduction(+:ret)
for(size_t i = 0; i < m; i++) {
const val_type *Wi = &W[k*i];
for(long idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) {
const val_type *Hj = &H[X.col_idx[idx]*k];
double tmp=0;
for(size_t t = 0; t < k; t++)
tmp += Wi[t]*Hj[t];
ret += X.val_t[idx]*tmp;
}
}
return (val_type)ret;
} // }}}
template<typename val_type>
val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H) { // {{{
assert(W.cols == H.cols && W.rows == X.rows && H.rows == X.cols);
if(W.is_colmajor() && H.is_colmajor()) {
double ret = 0;
#pragma omp parallel for schedule(static) reduction(+:ret)
for(size_t t = 0; t < W.cols; t++) {
const dvec_t<val_type> &u = W[t];
const dvec_t<val_type> &v = H[t];
double local_sum = 0;
for(size_t i = 0; i < X.rows; i++) {
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++)
local_sum += X.val_t[idx]*u[i]*v[X.col_idx[idx]];
}
ret += local_sum;
}
return ret;
} else {
double ret= 0;
#pragma omp parallel for schedule(dynamic,64) reduction(+:ret)
for(size_t i = 0; i < X.rows; i++) {
double local_sum = 0;
for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) {
size_t j = X.col_idx[idx];
double sum = 0;
for(size_t t = 0; t < W.cols; t++)
sum += W.at(i,t)*H.at(j,t);
local_sum += sum * X.val_t[idx];
}
ret += local_sum;
}
return ret;
}
} // }}}
/*
trace(W^T diag(D) H)
D is an m*1 vector
W is an m*k, row-majored array
H is an m*k, row-major array
*/
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const val_type *W, const val_type *D, const val_type *H, const size_t m, const size_t k) { // {{{
val_type *w = const_cast<val_type*>(W);
val_type *h = const_cast<val_type*>(H);
val_type *d = const_cast<val_type*>(D);
double ret = 0.0;
#pragma omp parallel for schedule(static) shared(w,h,d) reduction(+:ret)
for(size_t i = 0; i < m; i++) {
val_type *wi = &w[i*k], *hi = &h[i*k];
ret += do_dot_product(wi, wi, k) * d[i];
}
return (val_type)ret;
} // }}}
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dvec_t<val_type> &D, const dmat_t<val_type> &H) { // {{{
assert(W.rows == H.rows && W.rows == D.len && W.cols == H.cols);
assert(W.is_rowmajor() && H.is_rowmajor());
return trace_dmat_T_diag_dmat(W.data(),D.data(),H.data(),W.rows,W.cols);
} // }}}
template<typename val_type>
val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dmat_t<val_type> &D, const dmat_t<val_type> &H) { // {{{
return trace_dmat_T_diag_dmat(W, dvec_t<val_type>(D.get_view()), H);
} // }}}
//------------------ Implementation of zip_it -----------------------
// helpler functions and classes for zip_it
template<class T1, class T2>
struct zip_body { // {{{
T1 x; T2 y;
zip_body(const zip_ref<T1,T2>& other): x(*other.x), y(*other.y){}
bool operator<(const zip_body &other) const {return x < other.x;}
bool operator>(zip_body &other) const {return x > other.x;}
bool operator==(zip_body &other) const {return x == other.x;}
bool operator!=(zip_body &other) const {return x != other.x;}
}; // }}}
template<class T1, class T2>
struct zip_ref { // {{{
T1 *x; T2 *y;
zip_ref(T1 &x, T2 &y): x(&x), y(&y){}
zip_ref(zip_body<T1,T2>& other): x(&other.x), y(&other.y){}
bool operator<(zip_ref other) const {return *x < *other.x;}
bool operator>(zip_ref other) const {return *x > *other.x;}
bool operator==(zip_ref other) const {return *x == *other.x;}
bool operator!=(zip_ref other) const {return *x != *other.x;}
zip_ref& operator=(zip_ref& other) {
*x = *other.x; *y = *other.y;
return *(this);
}
zip_ref& operator=(zip_body<T1,T2> other) {
*x = other.x; *y = other.y;
return *(this);
}
}; // }}}
template<class T1, class T2>
void swap(zip_ref<T1,T2> a, zip_ref<T1,T2> b) { // {{{
std::swap(*(a.x),*(b.x));
std::swap(*(a.y),*(b.y));
} // }}}
template<class IterT1, class IterT2>
struct zip_it { // {{{
typedef std::random_access_iterator_tag iterator_category;
typedef typename std::iterator_traits<IterT1>::value_type T1;
typedef typename std::iterator_traits<IterT2>::value_type T2;
typedef zip_body<T1,T2> value_type;
typedef zip_ref<T1,T2> reference;
typedef zip_body<T1,T2>* pointer;
typedef ptrdiff_t difference_type;
IterT1 x;
IterT2 y;
zip_it(IterT1 x, IterT2 y): x(x), y(y){}
reference operator*() {return reference(*x, *y);}
reference operator[](const difference_type n) const {return reference(x[n],y[n]);}
zip_it& operator++() {++x; ++y; return *this;} // prefix ++
zip_it& operator--() {--x; --y; return *this;} // prefix --
zip_it operator++(int) {return zip_it(x++,y++);} // sufix ++
zip_it operator--(int) {return zip_it(x--,y--);} // sufix --
zip_it operator+(const difference_type n) {return zip_it(x+n,y+n);}
zip_it operator-(const difference_type n) {return zip_it(x-n,y-n);}
zip_it& operator+=(const difference_type n) {x+=n; y+=n; return *this;}
zip_it& operator-=(const difference_type n) {x-=n; y-=n; return *this;}
bool operator<(const zip_it& other) {return x<other.x;}
bool operator>(const zip_it& other) {return x>other.x;}
bool operator==(const zip_it& other) {return x==other.x;}
bool operator!=(const zip_it& other) {return x!=other.x;}
difference_type operator-(const zip_it& other) {return x-other.x;}
}; // }}}
template<class IterT1, class IterT2>
zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y) { // {{{
return zip_it<IterT1,IterT2>(x,y);
} // }}}
//}; // end of namespace rofu
#undef gmat_t
#undef eye_t
#undef smat_t
#undef dmat_t
#undef dvec_t
#endif // SPARSE_MATRIX_H
|
mbir_ct.c |
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <string.h>
//#include <time.h>
#include <sys/time.h>
#include "mbir_ct.h"
#include "MBIRModularDefs.h"
#include "MBIRModularUtils.h"
#include "allocate.h"
#include "A_comp.h"
#include "initialize.h"
#include "recon3d.h"
/* Internal Functions */
void readCmdLine(int argc, char *argv[], struct CmdLine *cmdline);
void procCmdLine(int argc, char *argv[], struct CmdLine *cmdline);
void printCmdLineUsage(char *ExecFileName);
int CmdLineHelpOption(char *string);
void setNumSliceDigits(char *basename, char *ext, int slice, struct SinoParams3DParallel *sinoparams, struct ImageParams3D *imgparams);
int main(int argc, char *argv[])
{
struct CmdLine cmdline;
struct Image3D Image;
struct Image3D ProxMap;
struct Sino3DParallel sinogram;
struct ReconParams reconparams;
struct SVParams svpar;
struct AValues_char **A_Padded_Map;
float *Aval_max_ptr;
char *ImageReconMask; /* Image reconstruction mask (determined by ROI) */
char fname[1024];
struct timeval tm1,tm2;
unsigned long long tdiff;
int i,j,jz;
float **e;
readCmdLine(argc, argv, &cmdline);
if(cmdline.verboseLevel)
{
fprintf(stdout,"SUPER-VOXEL MBIR RECONSTRUCTION FOR 3D PARALLEL-BEAM CT\n");
fprintf(stdout,"---- build time: %s, %s ----\n", __DATE__, __TIME__);
}
procCmdLine(argc, argv, &cmdline);
/* Read image/sino parameter files */
ReadSinoParams3DParallel(cmdline.SinoParamsFile,&sinogram.sinoparams);
ReadImageParams3D(cmdline.ImageParamsFile,&Image.imgparams);
if(cmdline.verboseLevel>1)
{
printSinoParams3DParallel(&sinogram.sinoparams);
printImageParams3D(&Image.imgparams);
}
if(cmdline.reconFlag)
{
ReadReconParams(cmdline.ReconParamsFile,&reconparams);
NormalizePriorWeights3D(&reconparams);
if(cmdline.verboseLevel>1)
{
if(cmdline.reconFlag == MBIR_MODULAR_RECONTYPE_QGGMRF_3D)
printReconParamsQGGMRF3D(&reconparams);
if(cmdline.reconFlag == MBIR_MODULAR_RECONTYPE_PandP)
printReconParamsPandP(&reconparams);
}
if(reconparams.ReconType != cmdline.reconFlag)
{
fprintf(stdout,"**\nWarning: \"PriorModel\" field in reconparams file doesn't agree with\n");
fprintf(stdout,"Warning: what the command line is doing. Proceeding anyway.\n**\n");
reconparams.ReconType = cmdline.reconFlag;
}
}
initSVParams(&svpar, Image.imgparams, sinogram.sinoparams); /* Initialize/allocate SV parameters */
if(cmdline.verboseLevel>1)
fprintf(stdout,"\n");
/* The image parameters specify the relevant slice range to reconstruct, so re-set the */
/* relevant sinogram parameters so it pulls the correct slices and indexes them consistently */
sinogram.sinoparams.NSlices = Image.imgparams.Nz;
sinogram.sinoparams.FirstSliceNumber = Image.imgparams.FirstSliceNumber;
int NvNc = sinogram.sinoparams.NViews * sinogram.sinoparams.NChannels;
int Nxy = Image.imgparams.Nx * Image.imgparams.Ny;
int Nz = Image.imgparams.Nz;
int FirstSliceNumber = Image.imgparams.FirstSliceNumber;
int SVLength = svpar.SVLength;
int Nsv = svpar.Nsv;
/* Detect the number of slice number digits in input file names */
if(cmdline.reconFlag)
setNumSliceDigits(cmdline.SinoDataFile,"2Dsinodata",FirstSliceNumber,&sinogram.sinoparams,&Image.imgparams);
else if(cmdline.readInitImageFlag)
setNumSliceDigits(cmdline.InitImageFile,"2Dimgdata",FirstSliceNumber,&sinogram.sinoparams,&Image.imgparams);
int NumSliceDigits = Image.imgparams.NumSliceDigits;
/* Allocate and generate recon mask based on ROIRadius */
ImageReconMask = GenImageReconMask(&Image.imgparams);
/* Read/compute/write System Matrix */
A_Padded_Map = (struct AValues_char **)multialloc(sizeof(struct AValues_char),2,Nsv,(2*SVLength+1)*(2*SVLength+1));
Aval_max_ptr = (float *) get_spc(Nxy,sizeof(float));
if(cmdline.readAmatrixFlag)
{
sprintf(fname,"%s.2Dsvmatrix",cmdline.SysMatrixFile);
if(cmdline.verboseLevel)
fprintf(stdout,"Reading system matrix...\n");
readAmatrix(fname, A_Padded_Map, Aval_max_ptr, &Image.imgparams, &sinogram.sinoparams, svpar);
}
else
{
if(cmdline.verboseLevel) {
fprintf(stdout,"Computing system matrix...\n");
gettimeofday(&tm1,NULL);
}
A_comp(A_Padded_Map,Aval_max_ptr,svpar,&sinogram.sinoparams,ImageReconMask,&Image.imgparams);
if(cmdline.verboseLevel) {
gettimeofday(&tm2,NULL);
tdiff = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000;
fprintf(stdout,"\tmatrix time = %llu ms\n",tdiff);
}
}
if(cmdline.writeAmatrixFlag)
{
sprintf(fname,"%s.2Dsvmatrix",cmdline.SysMatrixFile);
if(cmdline.verboseLevel>1)
fprintf(stdout,"Writing system matrix %s\n",fname);
else if(cmdline.verboseLevel)
fprintf(stdout,"Writing system matrix...\n");
writeAmatrix(fname,A_Padded_Map,Aval_max_ptr,&Image.imgparams,&sinogram.sinoparams,svpar);
}
/* Initialize image and forward project, if necessary */
if(cmdline.reconFlag || cmdline.writeProjectionFlag)
{
/* Initialize image */
AllocateImageData3D(&Image);
if(cmdline.readInitImageFlag)
{
if(cmdline.verboseLevel)
fprintf(stdout,"Reading initial image...\n");
ReadImage3D(cmdline.InitImageFile,&Image);
}
else
initConstImage(&Image, ImageReconMask, reconparams.InitImageValue, 0);
/* Initialize Forward Projection of initial image */
e = (float **)multialloc(sizeof(float),2,sinogram.sinoparams.NSlices,NvNc);
if(cmdline.readInitProjectionFlag)
{
for(jz=0;jz<Nz;jz++)
{
sprintf(fname,"%s_slice%.*d.2Dprojection",cmdline.inputProjectionFile,NumSliceDigits,jz+FirstSliceNumber);
if(ReadFloatArray(fname,e[jz],NvNc)) {
fprintf(stderr,"Error: can't read %s\n",fname);
exit(-1);
}
}
}
else /* Compute initial projection */
{
if(cmdline.verboseLevel) {
fprintf(stdout,"Projecting image...\n");
gettimeofday(&tm1,NULL);
}
if(cmdline.readInitImageFlag)
{
/* here we need to project each slice */
#pragma omp parallel for schedule(dynamic)
for(jz=0;jz<Nz;jz++)
forwardProject2D(e[jz],Image.image[jz],A_Padded_Map,Aval_max_ptr,&sinogram.sinoparams,&Image.imgparams,svpar);
}
else /* if IC not provided, only need to project 1st slice and copy */
{
if(reconparams.InitImageValue==0.0f)
{
for(i=0; i<NvNc; i++)
e[0][i] = 0.0f;
}
else
{
forwardProject2D(e[0],Image.image[0],A_Padded_Map,Aval_max_ptr,&sinogram.sinoparams,&Image.imgparams,svpar);
for(jz=1;jz<Nz;jz++)
memcpy(e[jz],e[0],NvNc*sizeof(float));
}
}
if(cmdline.verboseLevel>1) {
gettimeofday(&tm2,NULL);
tdiff = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000;
fprintf(stdout,"\tprojection time = %llu ms\n",tdiff);
}
}
}
/***** Reconstruction mode *****/
if(cmdline.reconFlag)
{
/* Allocate and Read sinogram data */
AllocateSinoData3DParallel(&sinogram);
ReadSinoData3DParallel(cmdline.SinoDataFile, &sinogram);
if(cmdline.SinoWeightsFileFlag)
{
ReadWeights3D(cmdline.SinoWeightsFile, &sinogram);
reconparams.weightType = 0;
}
else if(reconparams.weightType < 1) // if weightType is expecting file input, revert to default
reconparams.weightType = 1;
ComputeSinoWeights(sinogram,reconparams); // either compute internally, or scale input by 1/SigmaY^2
/* Read Proximal map if necessary */
if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP)
{
ProxMap.imgparams.Nx = Image.imgparams.Nx;
ProxMap.imgparams.Ny = Image.imgparams.Ny;
ProxMap.imgparams.Nz = Image.imgparams.Nz;
ProxMap.imgparams.FirstSliceNumber = Image.imgparams.FirstSliceNumber;
ProxMap.imgparams.NumSliceDigits = Image.imgparams.NumSliceDigits;
AllocateImageData3D(&ProxMap);
ReadImage3D(cmdline.ProxMapImageFile,&ProxMap);
reconparams.proximalmap = ProxMap.image; // **ptr to proximal map image
}
/* Start Reconstruction */
if(cmdline.verboseLevel)
{
fprintf(stdout,"Reconstructing...\n");
gettimeofday(&tm1,NULL);
}
/* "e" will hold the sinogram error (y-Ax) during reconstruction */
for(jz=0; jz<Nz; jz++)
for(i=0; i<NvNc; i++)
e[jz][i] = sinogram.sino[jz][i]-e[jz][i];
MBIRReconstruct3D(&Image,&sinogram,e,reconparams,svpar,A_Padded_Map,Aval_max_ptr,ImageReconMask,cmdline.verboseLevel);
if(cmdline.verboseLevel)
{
gettimeofday(&tm2,NULL);
tdiff = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000;
fprintf(stdout,"\tReconstruction time = %llu ms\n",tdiff);
}
/* Write out reconstructed image(s) */
if(cmdline.verboseLevel)
fprintf(stdout,"Writing image files...\n");
WriteImage3D(cmdline.ReconImageFile, &Image);
if(cmdline.writeProjectionFlag) /* flip it back to get projection Ax */
{
for(jz=0; jz<Nz; jz++)
for(i=0; i<NvNc; i++)
e[jz][i] = sinogram.sino[jz][i]-e[jz][i];
}
FreeSinoData3DParallel(&sinogram);
if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP)
FreeImageData3D(&ProxMap);
}
/* Write Projection of image state if requested */
if(cmdline.writeProjectionFlag)
{
if(cmdline.verboseLevel)
fprintf(stdout,"Writing projection to file...\n");
for(jz=0; jz<Nz; jz++)
{
sprintf(fname,"%s_slice%.*d.2Dprojection",cmdline.outputProjectionFile,NumSliceDigits,jz+FirstSliceNumber);
if( WriteFloatArray(fname,e[jz],NvNc) ) {
fprintf(stderr,"Error: can't open file %s for writing\n",fname);
exit(-1);
}
}
}
if(cmdline.reconFlag || cmdline.writeProjectionFlag)
{
multifree(e,2);
FreeImageData3D(&Image);
}
/* Free SV memory */
for(j=0;j<Nsv;j++) free((void *)svpar.bandMinMap[j].bandMin);
for(j=0;j<Nsv;j++) free((void *)svpar.bandMaxMap[j].bandMax);
free((void *)svpar.bandMinMap);
free((void *)svpar.bandMaxMap);
/* Free system matrix */
for(i=0;i<Nsv;i++)
for(j=0;j<(2*SVLength+1)*(2*SVLength+1);j++)
if(A_Padded_Map[i][j].length>0)
{
free((void *)A_Padded_Map[i][j].val);
free((void *)A_Padded_Map[i][j].pieceWiseMin);
free((void *)A_Padded_Map[i][j].pieceWiseWidth);
}
multifree(A_Padded_Map,2);
free((void *)Aval_max_ptr);
free((void *)ImageReconMask);
if(cmdline.verboseLevel)
fprintf(stdout,"Done.\n");
return(0);
}
/* Read Command-line */
void readCmdLine(int argc, char *argv[], struct CmdLine *cmdline)
{
char ch;
/* set defaults */
cmdline->SinoParamsFileFlag=0;
cmdline->ImageParamsFileFlag=0;
cmdline->ReconParamsFileFlag=0;
cmdline->SinoDataFileFlag=0;
cmdline->SinoWeightsFileFlag=0;
cmdline->ReconImageFileFlag=0;
cmdline->SysMatrixFileFlag=0;
cmdline->reconFlag = MBIR_MODULAR_RECONTYPE_QGGMRF_3D;
cmdline->readInitImageFlag=0;
cmdline->readInitProjectionFlag=0;
cmdline->writeProjectionFlag=0;
cmdline->verboseLevel=1;
/* Print usage statement if no arguments, or help argument given */
if(argc==1 || CmdLineHelpOption(argv[1]))
{
//fprintf(stdout,"Printing usage statement for %s\n",argv[0]);
printCmdLineUsage(argv[0]);
exit(0);
}
/* get options */
while ((ch = getopt(argc, argv, "i:j:k:s:w:r:m:t:e:f:p:v:")) != EOF)
{
switch (ch)
{
case 'i':
{
cmdline->ImageParamsFileFlag=1;
sprintf(cmdline->ImageParamsFile, "%s", optarg);
break;
}
case 'j':
{
cmdline->SinoParamsFileFlag=1;
sprintf(cmdline->SinoParamsFile, "%s", optarg);
break;
}
case 'k':
{
cmdline->ReconParamsFileFlag=1;
sprintf(cmdline->ReconParamsFile, "%s", optarg);
break;
}
case 's':
{
cmdline->SinoDataFileFlag=1;
sprintf(cmdline->SinoDataFile, "%s", optarg);
break;
}
case 'w':
{
cmdline->SinoWeightsFileFlag=1;
sprintf(cmdline->SinoWeightsFile, "%s", optarg);
break;
}
case 'r':
{
cmdline->ReconImageFileFlag=1;
sprintf(cmdline->ReconImageFile, "%s", optarg);
break;
}
case 'm':
{
cmdline->SysMatrixFileFlag=1;
sprintf(cmdline->SysMatrixFile, "%s", optarg);
break;
}
case 't':
{
cmdline->readInitImageFlag=1;
sprintf(cmdline->InitImageFile, "%s", optarg);
break;
}
case 'e':
{
cmdline->readInitProjectionFlag=1;
sprintf(cmdline->inputProjectionFile, "%s", optarg);
break;
}
case 'f':
{
cmdline->writeProjectionFlag=1;
sprintf(cmdline->outputProjectionFile, "%s", optarg);
break;
}
case 'p':
{
cmdline->reconFlag = MBIR_MODULAR_RECONTYPE_PandP;
sprintf(cmdline->ProxMapImageFile, "%s", optarg);
break;
}
case 'v':
{
sscanf(optarg,"%hhi",&cmdline->verboseLevel);
break;
}
default:
{
//fprintf(stderr,"%s: invalid option '%c'\n",argv[0],ch); //getopt does this already
fprintf(stderr,"Try '%s -help' for more information.\n",argv[0]);
exit(-1);
break;
}
}
}
}
/* Process Command-line */
void procCmdLine(int argc, char *argv[], struct CmdLine *cmdline)
{
if(cmdline->verboseLevel>1)
fprintf(stdout,"Parsing command line...\n");
/* Check for mandatory arguments */
if(!cmdline->SinoParamsFileFlag || !cmdline->ImageParamsFileFlag){
fprintf(stderr,"Error: Either sinoparams or imgparams (-i,-j) file wasn't specified\n");
fprintf(stderr,"Try '%s -help' for more information.\n",argv[0]);
exit(-1);
}
/* Determine what to do based on supplied options
* cmdline->reconFlag
* cmdline->readInitImageFlag
* cmdline->readInitProjectionFlag
* cmdline->writeProjectionFlag
*/
cmdline->readAmatrixFlag=0;
cmdline->writeAmatrixFlag=0;
if(cmdline->ReconImageFileFlag) /* reconstruction mode */
{
if(cmdline->SysMatrixFileFlag)
cmdline->readAmatrixFlag=1;
if(cmdline->readInitProjectionFlag && !cmdline->readInitImageFlag)
cmdline->readInitProjectionFlag = 0;
if(!cmdline->ReconParamsFileFlag || !cmdline->SinoDataFileFlag)
{
fprintf(stderr,"Error: Either input data or reconstruction parameters weren't specified\n");
fprintf(stderr,"Try '%s -help' for more information.\n",argv[0]);
exit(-1);
}
}
else /* precompute matrix or project input image */
{
cmdline->reconFlag=0;
cmdline->readInitProjectionFlag=0;
if(cmdline->writeProjectionFlag && !cmdline->readInitImageFlag)
cmdline->writeProjectionFlag = 0;
if(cmdline->writeProjectionFlag && cmdline->readInitImageFlag) /* projection mode */
{
if(cmdline->SysMatrixFileFlag)
cmdline->readAmatrixFlag=1;
}
else /* pre-compute matrix */
{
if(cmdline->SysMatrixFileFlag)
cmdline->writeAmatrixFlag=1;
else
{
fprintf(stderr,"Error: From the given command options, not sure what you want to do.\n");
fprintf(stderr,"Try '%s -help' for more information.\n",argv[0]);
exit(-1);
}
}
}
/* Print output and check errors of above parsing sequence */
if(cmdline->verboseLevel>1)
{
if(cmdline->reconFlag)
{
fprintf(stdout,"-> will perform reconstruction ");
if(cmdline->reconFlag == MBIR_MODULAR_RECONTYPE_QGGMRF_3D)
fprintf(stdout,"(QGGMRF)\n");
if(cmdline->reconFlag == MBIR_MODULAR_RECONTYPE_PandP)
fprintf(stdout,"(Plug & Play)\n");
if(cmdline->readAmatrixFlag)
fprintf(stdout,"-> will read system matrix from file\n");
else
{
fprintf(stdout,"-> will compute system matrix\n");
fprintf(stdout," *** NOTE if you precompute/store the system matrix, any further reconstruction\n");
fprintf(stdout," *** with the same image/sinogram in-slice dimensions will execute MUCH faster.\n");
fprintf(stdout," *** See help (-m option)\n");
// fprintf(stdout,"***80 columns*******************************************************************\n\n");
}
if(!cmdline->SinoWeightsFileFlag)
fprintf(stdout,"-> will compute sinogram weights internally (no file provided)\n");
if(cmdline->readInitImageFlag)
fprintf(stdout,"-> will read initial condition from file(s)\n");
if(cmdline->readInitProjectionFlag)
fprintf(stdout,"-> will read projection of initial condition\n");
else
fprintf(stdout,"-> will compute forward projection of initial condition\n");
if(cmdline->writeProjectionFlag)
fprintf(stdout,"-> will save projection of output image state to file(s)\n");
}
else if(cmdline->writeAmatrixFlag || cmdline->writeProjectionFlag)
{
fprintf(stdout,"-> no reconstruction\n");
if(cmdline->writeAmatrixFlag)
fprintf(stdout,"-> will compute system matrix and write to file\n");
if(cmdline->writeProjectionFlag)
fprintf(stdout,"-> will compute projection and write to file(s)\n");
if(cmdline->ReconParamsFileFlag || cmdline->SinoDataFileFlag || cmdline->SinoWeightsFileFlag || cmdline->readInitProjectionFlag)
fprintf(stdout,"Note some command line options are being ignored.\n");
}
fprintf(stdout,"\n");
fprintf(stdout,"Filenames provided:\n");
if(cmdline->SinoParamsFileFlag)
fprintf(stdout," Sino params = %s.sinoparams\n",cmdline->SinoParamsFile);
if(cmdline->ImageParamsFileFlag)
fprintf(stdout," Image params = %s.imgparams\n",cmdline->ImageParamsFile);
if(cmdline->ReconParamsFileFlag)
fprintf(stdout," Recon params = %s.reconparams\n",cmdline->ReconParamsFile);
if(cmdline->SinoDataFileFlag)
fprintf(stdout," Sinogram data = %s_sliceNNN.2Dsinodata\n",cmdline->SinoDataFile);
if(cmdline->SinoWeightsFileFlag)
fprintf(stdout," Weight data = %s_sliceNNN.2Dweightdata\n",cmdline->SinoWeightsFile);
if(cmdline->ReconImageFileFlag)
fprintf(stdout," Output images = %s_sliceNNN.2Dimgdata\n",cmdline->ReconImageFile);
if(cmdline->readInitImageFlag)
fprintf(stdout," Initial image = %s_sliceNNN.2Dimgdata\n",cmdline->InitImageFile);
if(cmdline->SysMatrixFileFlag)
fprintf(stdout," System matrix = %s.2Dsvmatrix\n",cmdline->SysMatrixFile);
if(cmdline->readInitProjectionFlag)
fprintf(stdout," Initial projection = %s.2Dprojection\n",cmdline->inputProjectionFile);
if(cmdline->writeProjectionFlag)
fprintf(stdout," Output projection = %s_sliceNNN.2Dprojection\n",cmdline->outputProjectionFile);
}
}
int NumSliceDigits(char *basename, char *ext, int slice)
{
FILE *fp;
char fname[1024];
int Ndigits = MBIR_MODULAR_MAX_NUMBER_OF_SLICE_DIGITS;
while(Ndigits > 0)
{
sprintf(fname,"%s_slice%.*d.%s",basename, Ndigits, slice, ext);
//printf("%s\n",fname);
if( (fp=fopen(fname,"r")) ) {
fclose(fp);
break;
}
else
Ndigits--;
}
return(Ndigits);
}
void setNumSliceDigits(
char *basename,
char *ext,
int slice,
struct SinoParams3DParallel *sinoparams,
struct ImageParams3D *imgparams)
{
int Ndigits;
if( (Ndigits = NumSliceDigits(basename,ext,slice)) > 0 )
{
sinoparams->NumSliceDigits = Ndigits;
imgparams->NumSliceDigits = Ndigits;
}
else
{
fprintf(stderr,"Error: Can't determine number of slice digits from given input file.\n");
fprintf(stderr,"* Looking for file with this format: %s_slice%d.%s\n",basename,slice,ext);
fprintf(stderr,"* where the slice number can contain leading zeros but no spaces.\n");
exit(-1);
}
}
void printBanner(void)
{
fprintf(stdout,"MBIR RECONSTRUCTION FOR 3D PARALLEL-BEAM CT\n");
fprintf(stdout,"build time: %s, %s\n\n", __DATE__, __TIME__);
}
void printCmdLineUsage(char *ExecFileName)
{
// fprintf(stdout,"***80 columns*******************************************************************\n\n");
fprintf(stdout,"Command Line Help\n\n");
fprintf(stdout,"There are three forms for the command line. One pre-computes and stores the\n");
fprintf(stdout,"system matrix (saves time for other reconstructions w/ same geometry).\n");
fprintf(stdout,"The second reconstructs the input sinogram, and the third computes the\n");
fprintf(stdout,"projection of an input image set.\n");
fprintf(stdout,"\n");
fprintf(stdout,"Pre-compute system matrix: (printed on multiple lines for clarity)\n");
fprintf(stdout,"\n");
fprintf(stdout," %s\n",ExecFileName);
fprintf(stdout,"\t-i <filename>[.imgparams] : Input image parameters\n");
fprintf(stdout,"\t-j <filename>[.sinoparams] : Input sinogram parameters\n");
fprintf(stdout,"\t-m <filename>[.2Dsvmatrix] : Output matrix file\n");
fprintf(stdout,"\n");
// fprintf(stdout,"***80 columns*******************************************************************\n\n");
fprintf(stdout,"Perform reconstruction:\n");
fprintf(stdout,"\n");
fprintf(stdout," %s\n",ExecFileName);
fprintf(stdout,"\t-i <filename>[.imgparams] : Input image parameters\n");
fprintf(stdout,"\t-j <filename>[.sinoparams] : Input sinogram parameters\n");
fprintf(stdout,"\t-k <filename>[.reconparams] : Reconstruction parameters\n");
fprintf(stdout,"\t-s <baseFilename> : Input sinogram projection file(s)\n");
fprintf(stdout,"\t-r <baseFilename> : Output reconstruced image file(s)\n");
fprintf(stdout," (following are optional)\n");
fprintf(stdout,"\t-m <filename>[.2Dsvmatrix] : INPUT matrix file (params must correspond!)\n");
fprintf(stdout,"\t-w <baseFilename> : Input sinogram weight file(s)\n");
fprintf(stdout,"\t-t <baseFilename> : Input initial condition image(s)\n");
fprintf(stdout,"\t-e <baseFilename> : Input projection of initial condition\n");
fprintf(stdout,"\t-f <baseFilename> : Output projection of final image state\n");
fprintf(stdout,"\t-p <baseFilename> : Proximal map image(s) for Plug & Play\n");
fprintf(stdout,"\t : ** -p specifies to use proximal prior\n");
fprintf(stdout,"\t : ** generally use with -t -e -f\n");
// fprintf(stdout,"***80 columns*******************************************************************\n\n");
fprintf(stdout,"\t-v <verbose level> : 0:quiet, 1:status info (default), 2:more info\n");
fprintf(stdout,"\n");
fprintf(stdout,"Compute projection of input only:\n");
fprintf(stdout,"\n");
fprintf(stdout," %s\n",ExecFileName);
fprintf(stdout,"\t-i <filename>[.imgparams] : Input image parameters\n");
fprintf(stdout,"\t-j <filename>[.sinoparams] : Input sinogram parameters\n");
fprintf(stdout,"\t-t <baseFilename> : Input image set\n");
fprintf(stdout,"\t-f <baseFilename> : Output projection\n");
fprintf(stdout," (following are optional)\n");
fprintf(stdout,"\t-m <filename>[.2Dsvmatrix] : INPUT matrix file (params must correspond!)\n");
fprintf(stdout,"\n");
fprintf(stdout,"In the above arguments, the exensions given in the '[]' symbols must be part of\n");
fprintf(stdout,"the file names but should be omitted from the command line.\n");
fprintf(stdout,"For all the arguments specifying <baseFilename>, the relevant 3D data is split\n");
fprintf(stdout,"across files, one file per slice. The file naming convention is as follows,\n");
fprintf(stdout,"depending on the data contents:\n");
fprintf(stdout,"\n");
fprintf(stdout,"\t<baseFilename>_slice<sliceIndex>.2Dimgdata\n");
fprintf(stdout,"\t<baseFilename>_slice<sliceIndex>.2Dsinodata\n");
fprintf(stdout,"\t<baseFilename>_slice<sliceIndex>.2Dweightdata\n");
fprintf(stdout,"\t<baseFilename>_slice<sliceIndex>.2Dprojection\n");
fprintf(stdout,"\n");
fprintf(stdout,"where <sliceIndex> (skip '<>' symbols) is a non-negative integer including\n");
fprintf(stdout,"leading zeros and no spaces (e.g. 0000 to 1023). The number of digits\n");
fprintf(stdout,"is flexible (up to %d) but must be consistent.\n",MBIR_MODULAR_MAX_NUMBER_OF_SLICE_DIGITS);
fprintf(stdout,"\n");
}
int CmdLineHelpOption(char *string)
{
if( (strcmp(string,"-h")==0) || (strcmp(string,"-help")==0) || (strcmp(string,"--help")==0) || (strcmp(string,"help")==0) )
return 1;
else
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int n_threads, id_thread, i;
/*omp_set_num_threads(5); //setto il numero di thread.
#pragma omp parallel private (id_thread) //i 3 thread faranno contemporaneamente quello che c'è all'interno delle parentesi graffe
{
id_thread = omp_get_thread_num();
printf ("Sono: %d \n", id_thread);
#pragma omp for //i 3 thread si organizzeranno e si distribuiranno le operazioni.
for (i = 0; i <= 4; i++)
{
printf ("Iterazione %d del thread %d. \n", i, id_thread);
}
}*/
//I costrutti Parallel e For possono anche essere combinati. Il numero di threads può essere settato nella clausola "num_threads".
#pragma omp parallel for private(id_thread) num_threads(5)
for(i = 0; i < 5; i++)
{
id_thread = omp_get_thread_num();
printf ("Iterazione %d del thread %d. \n", i, id_thread);
}
return 0;
}
|
gsrb.ca.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
//#define GSRB_STRIDE2
//#define GSRB_FP
//------------------------------------------------------------------------------------------------------------------------------
// This implements a communication avoiding (aggregation) smoother
// It assumes...
// in-place updates (no ping pong)
// stencil radius==1
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){
int box,s;
int ghosts = level->box_ghosts;
int communicationAvoiding = ghosts > stencil_get_radius();
if(stencil_get_radius()>1){fprintf(stderr,"CA GSRB requires a stencil radius of 1\n");exit(0);}
// if communication-avoiding, need updated RHS for stencils in ghost zones
if(communicationAvoiding)exchange_boundary(level,rhs_id,STENCIL_SHAPE_BOX);
for(s=0;s<2*NUM_SMOOTHS;s+=ghosts){ // there are two sweeps per GSRB smooth
exchange_boundary(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape());
apply_BCs(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape());
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
for(box=0;box<level->num_my_boxes;box++){
int i,j,k,ss;
int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k)&1; // is element 000 red or black ??? (should only be an issue if box dimension is odd)
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int dim = level->my_boxes[box].dim;
const double h2inv = 1.0/(level->h*level->h);
const double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double * __restrict__ phi_new = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
const double * __restrict__ RedBlack[2] = {level->RedBlack_FP[0] + ghosts*(1+jStride),
level->RedBlack_FP[1] + ghosts*(1+jStride)};
int ghostsToOperateOn=ghosts-1;
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
#if defined(GSRB_FP)
#warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization...
#pragma omp parallel for private(i,j,k) collapse(2)
for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){
int EvenOdd = (k^ss^color000)&1;
int ij = i + j*jStride;
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi_new[ijk] = phi[ijk] + RedBlack[EvenOdd][ij]*Dinv[ijk]*(rhs[ijk]-Ax); // compiler seems to get confused unless there are disjoint read/write pointers
}}}
#elif defined(GSRB_STRIDE2)
#warning GSRB using stride-2 accesses to minimie the number of flop's
#pragma omp parallel for private(i,j,k) collapse(2)
for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){
for(i=((j^k^ss^color000)&1)+1-ghosts;i<dim+ghostsToOperateOn;i+=2){ // stride-2 GSRB
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi_new[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}
#else
#warning GSRB using if-then-else on loop indices for Red-Black because its easy to read...
#pragma omp parallel for private(i,j,k) collapse(2)
for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){
if((i^j^k^ss^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi_new[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}}
#endif
} // ss-loop
} // boxes
level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
HybridRealAdoptor.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
//////////////////////////////////////////////////////////////////////////////////////
/** @file HybridRealAdoptor.h
*
* Adoptor classes to handle real hybrid orbitals with arbitrary precision
*/
#ifndef QMCPLUSPLUS_HYBRID_REAL_SOA_ADOPTOR_H
#define QMCPLUSPLUS_HYBRID_REAL_SOA_ADOPTOR_H
#include <QMCWaveFunctions/BsplineFactory/HybridAdoptorBase.h>
namespace qmcplusplus
{
/** adoptor class to match
*
*/
template<typename BaseAdoptor>
struct HybridRealSoA : public BaseAdoptor, public HybridAdoptorBase<typename BaseAdoptor::DataType>
{
using HybridBase = HybridAdoptorBase<typename BaseAdoptor::DataType>;
using ST = typename BaseAdoptor::DataType;
using PointType = typename BaseAdoptor::PointType;
using SingleSplineType = typename BaseAdoptor::SingleSplineType;
using RealType = typename SPOSet::RealType;
using ValueType = typename SPOSet::ValueType;
typename OrbitalSetTraits<ValueType>::ValueVector_t psi_AO, d2psi_AO;
typename OrbitalSetTraits<ValueType>::GradVector_t dpsi_AO;
Matrix<ST, aligned_allocator<ST>> multi_myV;
using BaseAdoptor::HalfG;
using BaseAdoptor::myG;
using BaseAdoptor::myH;
using BaseAdoptor::myL;
using BaseAdoptor::myV;
using BaseAdoptor::PrimLattice;
using HybridBase::d2f_dr2;
using HybridBase::df_dr;
using HybridBase::dist_dr;
using HybridBase::dist_r;
HybridRealSoA() : BaseAdoptor()
{
this->AdoptorName = "Hybrid" + this->AdoptorName;
this->KeyWord = "Hybrid" + this->KeyWord;
}
inline void resizeStorage(size_t n, size_t nvals)
{
BaseAdoptor::resizeStorage(n, nvals);
HybridBase::resizeStorage(myV.size());
}
void bcast_tables(Communicate* comm)
{
BaseAdoptor::bcast_tables(comm);
HybridBase::bcast_tables(comm);
}
void gather_tables(Communicate* comm)
{
BaseAdoptor::gather_tables(comm);
HybridBase::gather_atomic_tables(comm, BaseAdoptor::offset);
}
inline void flush_zero()
{
//BaseAdoptor::flush_zero();
HybridBase::flush_zero();
}
bool read_splines(hdf_archive& h5f) { return HybridBase::read_splines(h5f) && BaseAdoptor::read_splines(h5f); }
bool write_splines(hdf_archive& h5f) { return HybridBase::write_splines(h5f) && BaseAdoptor::write_splines(h5f); }
template<typename VV>
inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi)
{
const RealType smooth_factor = HybridBase::evaluate_v(P, iat, myV);
const RealType cone(1);
if (smooth_factor < 0)
{
BaseAdoptor::evaluate_v(P, iat, psi);
}
else if (smooth_factor == cone)
{
const PointType& r = P.activeR(iat);
int bc_sign = HybridBase::get_bc_sign(r, PrimLattice, HalfG);
BaseAdoptor::assign_v(bc_sign, myV, psi, 0, myV.size());
}
else
{
const PointType& r = P.activeR(iat);
psi_AO.resize(psi.size());
int bc_sign = HybridBase::get_bc_sign(r, PrimLattice, HalfG);
BaseAdoptor::assign_v(bc_sign, myV, psi_AO, 0, myV.size());
BaseAdoptor::evaluate_v(P, iat, psi);
HybridBase::interpolate_buffer_v(psi, psi_AO);
}
}
template<typename VV, typename RT>
inline void evaluateDetRatios(const VirtualParticleSet& VP, VV& psi, const VV& psiinv, std::vector<RT>& ratios)
{
if (VP.isOnSphere() && HybridBase::is_batched_safe(VP))
{
// resize scratch space
psi_AO.resize(psi.size());
if (multi_myV.rows() < VP.getTotalNum())
multi_myV.resize(VP.getTotalNum(), myV.size());
std::vector<int> bc_signs(VP.getTotalNum());
const RealType smooth_factor = HybridBase::evaluateValuesR2R(VP, PrimLattice, HalfG, multi_myV, bc_signs);
const RealType cone(1);
for (int iat = 0; iat < VP.getTotalNum(); ++iat)
{
if (smooth_factor < 0)
BaseAdoptor::evaluate_v(VP, iat, psi);
else if (smooth_factor == cone)
{
const PointType& r = VP.R[iat];
Vector<ST, aligned_allocator<ST>> myV_one(multi_myV[iat], myV.size());
BaseAdoptor::assign_v(bc_signs[iat], myV_one, psi, 0, myV.size());
}
else
{
Vector<ST, aligned_allocator<ST>> myV_one(multi_myV[iat], myV.size());
BaseAdoptor::assign_v(bc_signs[iat], myV_one, psi_AO, 0, myV.size());
BaseAdoptor::evaluate_v(VP, iat, psi);
HybridBase::interpolate_buffer_v(psi, psi_AO);
}
ratios[iat] = simd::dot(psi.data(), psiinv.data(), psi.size());
}
}
else
{
for (int iat = 0; iat < VP.getTotalNum(); ++iat)
{
evaluate_v(VP, iat, psi);
ratios[iat] = simd::dot(psi.data(), psiinv.data(), psi.size());
}
}
}
template<typename VV, typename GV>
inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi)
{
const RealType smooth_factor = HybridBase::evaluate_vgl(P, iat, myV, myG, myL);
const RealType cone(1);
if (smooth_factor < 0)
{
BaseAdoptor::evaluate_vgl(P, iat, psi, dpsi, d2psi);
}
else if (smooth_factor == cone)
{
const PointType& r = P.activeR(iat);
int bc_sign = HybridBase::get_bc_sign(r, PrimLattice, HalfG);
BaseAdoptor::assign_vgl_from_l(bc_sign, psi, dpsi, d2psi);
}
else
{
const PointType& r = P.activeR(iat);
psi_AO.resize(psi.size());
dpsi_AO.resize(psi.size());
d2psi_AO.resize(psi.size());
int bc_sign = HybridBase::get_bc_sign(r, PrimLattice, HalfG);
BaseAdoptor::assign_vgl_from_l(bc_sign, psi_AO, dpsi_AO, d2psi_AO);
BaseAdoptor::evaluate_vgl(P, iat, psi, dpsi, d2psi);
HybridBase::interpolate_buffer_vgl(psi, dpsi, d2psi, psi_AO, dpsi_AO, d2psi_AO);
}
}
template<typename VV, typename GV>
inline void mw_evaluate_vgl(const std::vector<HybridRealSoA*>& sa_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<VV*>& psi_v_list,
const std::vector<GV*>& dpsi_v_list,
const std::vector<VV*>& d2psi_v_list)
{
#pragma omp parallel for
for (int iw = 0; iw < sa_list.size(); iw++)
sa_list[iw]->evaluate_vgl(*P_list[iw], iat, *psi_v_list[iw], *dpsi_v_list[iw], *d2psi_v_list[iw]);
}
template<typename VV, typename GV, typename GGV>
inline void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi)
{
APP_ABORT("HybridRealSoA::evaluate_vgh not implemented!");
if (HybridBase::evaluate_vgh(P, iat, myV, myG, myH))
{
const PointType& r = P.activeR(iat);
int bc_sign = HybridBase::get_bc_sign(r, PrimLattice, HalfG);
BaseAdoptor::assign_vgh(bc_sign, psi, dpsi, grad_grad_psi, 0, myV.size());
}
else
BaseAdoptor::evaluate_vgh(P, iat, psi, dpsi, grad_grad_psi);
}
};
} // namespace qmcplusplus
#endif
|
vlisa_generic.c | /*
** Implementation of LISA algorithm
** for statistical inference of fMRI images
**
** generic plug-in.
** A file containing a list of all 3D permutation images must be supplied.
**
** G.Lohmann, April 2017
*/
#include <viaio/Vlib.h>
#include <viaio/file.h>
#include <viaio/mu.h>
#include <viaio/option.h>
#include <viaio/os.h>
#include <viaio/VImage.h>
#include <via/via.h>
#include <gsl/gsl_cdf.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_histogram.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif /*_OPENMP*/
#define ABS(x) ((x) > 0 ? (x) : -(x))
extern void VIsolatedVoxels(VImage src,float threshold);
extern void VHistogram(gsl_histogram *histogram,VString filename);
extern void VCheckImage(VImage src);
extern void FDR(VImage src,VImage dest,gsl_histogram *nullhist,gsl_histogram *realhist,double alpha);
extern void ImageStats(VImage src,double *,double *,double *hmin,double *hmax);
extern void VBilateralFilter(VImage src,VImage dest,int radius,double var1,double var2,int);
extern double VImageVar(VImage src);
extern void VImageCount(VImage src);
extern void VGetHistRange(VImage src,double *hmin,double *hmax);
extern float VGetMode(VImage src);
extern void VZScale(VImage src,float mode,float stddev);
extern void HistoUpdate(VImage,gsl_histogram *);
/* make sure all input images are in float and have the same number of pixels */
void CheckImageTypes(VImage zmap,VImage *permimages,int numperm)
{
size_t npixels = VImageNPixels(zmap);
int i;
for (i=0; i<numperm; i++) {
if (npixels != VImageNPixels(permimages[i]))
VError(" inconsistent number of pixels in permutation image %d",i);
if (VPixelRepn(permimages[i]) != VFloatRepn) VError(" perm image %d is not in float repn");
}
}
int main (int argc, char *argv[])
{
static VString filename = "";
static VFloat alpha = 0.05;
static VShort radius = 2;
static VFloat rvar = 2.0;
static VFloat svar = 2.0;
static VShort numiter = 2;
static VBoolean centering = FALSE;
static VBoolean cleanup = TRUE;
static VShort nproc = 0;
static VOptionDescRec options[] = {
{"permutations",VStringRepn,1,(VPointer) &filename,VRequiredOpt,NULL,"List of all permutation images"},
{"alpha",VFloatRepn,1,(VPointer) &alpha,VOptionalOpt,NULL,"FDR significance level"},
{"radius",VShortRepn,1,(VPointer) &radius,VOptionalOpt,NULL,"Bilateral parameter (radius in voxels)"},
{"rvar",VFloatRepn,1,(VPointer) &rvar,VOptionalOpt,NULL,"Bilateral parameter (radiometric)"},
{"svar",VFloatRepn,1,(VPointer) &svar,VOptionalOpt,NULL,"Bilateral parameter (spatial)"},
{"filteriterations",VShortRepn,1,(VPointer) &numiter,VOptionalOpt,NULL,"Bilateral parameter (number of iterations)"},
{"cleanup",VBooleanRepn,1,(VPointer) &cleanup,VOptionalOpt,NULL,"Whether to remove isolated voxels"},
{"j",VShortRepn,1,(VPointer) &nproc,VOptionalOpt,NULL,"Number of processors to use, '0' to use all"},
};
FILE *out_file=NULL,*in_file=NULL;
VString in_filename=NULL;
char *prg_name=GetLipsiaName("vlisa_generic");
fprintf (stderr, "%s\n", prg_name);
gsl_set_error_handler_off ();
/* Parse command line arguments and identify files: */
VParseFilterCmdZ (VNumber (options), options, argc, argv,&in_file,&out_file,&in_filename);
/* Read the input zmap file: */
VAttrList list = VReadAttrListZ(in_file,in_filename,0L,TRUE,FALSE);
VImage zmap1 = VReadImage(list);
if (zmap1 == NULL) VError(" no input zmap image found");
if (VPixelRepn(zmap1) != VFloatRepn) VError(" input pixel repn must be float");
VAttrList geolist = VGetGeoInfo(list);
/* Read permutation file containing a list of 3D images */
VAttrList listperm = VReadAttrList(filename,0L,FALSE,FALSE);
int numperm = VAttrListNumImages(listperm);
VImage *zmap = VAttrListGetImages(listperm,numperm);
CheckImageTypes(zmap1,zmap,numperm);
fprintf(stderr," Number of permutation images: %d\n",(int)numperm);
/* estimate null variance to adjust radiometric parameter, use first 30 permutations */
double zvar=0,hmin=0,hmax=0;
float stddev=1.0;
int nperm=0;
if (numperm > 0) {
int tstperm = 30;
if (tstperm > numperm) tstperm = numperm;
double varsum=0,nx=0;
for (nperm = 0; nperm < tstperm; nperm++) {
zvar = VImageVar(zmap[nperm]);
varsum += zvar;
nx++;
}
double meanvar = varsum/nx;
stddev = sqrt(meanvar);
}
/* get non-permuted hotspot map */
float mode=0;
if (centering) mode = VGetMode(zmap1);
if (numperm > 0) VZScale(zmap1,mode,stddev);
VImage dst1 = VCreateImageLike(zmap1);
VBilateralFilter(zmap1,dst1,(int)radius,(double)rvar,(double)svar,(int)numiter);
/* ini histograms */
VGetHistRange(dst1,&hmin,&hmax);
size_t nbins = 20000;
gsl_histogram *hist0 = gsl_histogram_alloc (nbins);
gsl_histogram_set_ranges_uniform (hist0,hmin,hmax);
gsl_histogram *histz = gsl_histogram_alloc (nbins);
gsl_histogram_set_ranges_uniform (histz,hmin,hmax);
HistoUpdate(dst1,histz);
/* omp-stuff */
#ifdef _OPENMP
int num_procs=omp_get_num_procs();
if (nproc > 0 && nproc < num_procs) num_procs = nproc;
fprintf(stderr," using %d cores\n",(int)num_procs);
omp_set_num_threads(num_procs);
#endif /* _OPENMP */
/* do random permutations */
#pragma omp parallel for shared(zmap) schedule(dynamic)
for (nperm = 0; nperm < numperm; nperm++) {
if (nperm%20 == 0) fprintf(stderr," perm %4d of %d\r",nperm,(int)numperm);
float mode=0;
if (centering) mode = VGetMode(zmap[nperm]);
VZScale(zmap[nperm],mode,stddev);
VImage dst = VCreateImageLike (zmap1);
VBilateralFilter(zmap[nperm],dst,(int)radius,(double)rvar,(double)svar,(int)numiter);
#pragma omp critical
{
HistoUpdate(dst,hist0);
}
VDestroyImage(dst);
}
/* apply fdr */
VImage fdrimage = VCopyImage (dst1,NULL,VAllBands);
if (numperm > 0) {
FDR(dst1,fdrimage,hist0,histz,(double)alpha);
if (cleanup && alpha < 1.0) {
VIsolatedVoxels(fdrimage,(float)(1.0-alpha));
}
}
/* output */
VAttrList out_list = VCreateAttrList ();
VHistory(VNumber(options),options,prg_name,&list,&out_list);
VSetGeoInfo(geolist,out_list);
VAppendAttr (out_list,"image",NULL,VImageRepn,fdrimage);
if (! VWriteFile (out_file, out_list)) exit (1);
fprintf (stderr, "\n%s: done.\n", argv[0]);
exit(0);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unaryop__ainv_uint64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_fp32
// op(A') function: GB_tran__ainv_uint64_fp32
// C type: uint64_t
// A type: float
// cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_fp32
(
uint64_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_indirectIndex.c | // A loop with array references using indirect indexing
//
// Conventional parallelization algorithms will not parallelize the loop
// since indirect indexing may result in overlapped elements being accessed,
// which in turn introduces loop carried dependencies.
//
// However, if users can provide semantics that the indirect indexing will
// not result in overlapping elements (or unique elements), the loop can be parallelized.
//
// This is a simplified version based on code examples provided by Jeff Keasler.
//
// Liao, 5/12/2009
#define length 100
#include <omp.h>
double eps[100];
int zoneset[100];
void StressCheckEpsFail(double eps_failure_model)
{
int i;
int index;
#pragma omp parallel for private (index,i) firstprivate (eps_failure_model)
for (i = 0; i <= 99; i += 1) {
index = zoneset[i];
eps[zoneset[i]] = eps_failure_model * 1.01;
eps[zoneset[i]] = 1.01;
}
}
// a multi level definition chain
void StressCheckEpsFaili2(double eps_failure_model)
{
int i;
int index1;
#pragma omp parallel for private (index1,i) firstprivate (eps_failure_model)
for (i = 0; i <= 99; i += 1) {
index1 = zoneset[i];
int index2 = index1;
eps[zoneset[i]] = eps_failure_model * 1.01;
eps[zoneset[i]] = 1.01;
}
}
// a multi dimensional case
void foo()
{
int n = 100;
int m = 100;
double b[n][m];
int i;
int j;
int index;
int zoneset[m];
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (index,j)
for (j = 0; j <= m - 1; j += 1) {
index = zoneset[j];
b[i][zoneset[j]] = b[i - 1][index - 1];
}
}
}
|
LISAgeometry.c | /**
* \author Sylvain Marsat, University of Maryland - NASA GSFC
*
* \brief C code for the geometric coefficients entering the response for LISA-like detectors.
*
*/
#define _XOPEN_SOURCE 500
#ifdef __GNUC__
#define UNUSED __attribute__ ((unused))
#else
#define UNUSED
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <time.h>
#include <unistd.h>
#include <getopt.h>
#include <stdbool.h>
#include <string.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_bspline.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_min.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_complex.h>
#include "constants.h"
#include "waveform.h"
#include "LISAgeometry.h"
#include <time.h> /* for testing */
//Named LISA-like constellation struct examples
/*
struct tagLISAconstellation {
double OrbitOmega,OrbitPhi0,OrbitR;
double ConstOmega,ConstPhi0,ConstL;
}
*/
LISAconstellation LISAProposal = {
EarthOrbitOmega_SI,
0,
AU_SI,
EarthOrbitOmega_SI,
0,
2.5e9,
LISAProposalnoise
};
LISAconstellation LISA2017 = {
EarthOrbitOmega_SI,
0,
AU_SI,
EarthOrbitOmega_SI,
0,
2.5e9,
LISA2017noise
};
LISAconstellation LISA2010 = {
EarthOrbitOmega_SI,
0,
AU_SI,
EarthOrbitOmega_SI,
0,
5e9,
LISA2010noise
};
LISAconstellation slowOrbitLISA = {
EarthOrbitOmega_SI/100.0,
0,
AU_SI,
EarthOrbitOmega_SI/100.0,
0,
2.5e9,
LISA2017noise
};
LISAconstellation tinyOrbitLISA = {
EarthOrbitOmega_SI,
0,
AU_SI/100,
EarthOrbitOmega_SI,
0,
2.5e9,
LISA2017noise
};
LISAconstellation fastOrbitLISA = {
EarthOrbitOmega_SI*10.0,
0,
AU_SI,
EarthOrbitOmega_SI*10.0,
0,
2.5e9,
LISA2017noise
};
LISAconstellation bigOrbitLISA = {
EarthOrbitOmega_SI/10.0,
0,
AU_SI,
EarthOrbitOmega_SI/10.0,
0,
2.5e9,
LISA2017noise
};
/****************************************************************/
/********* Coefficients for the geometric response **************/
/* External storage for cos, sin and coefficients */
static double coeffn1Hn1crossconst, coeffn1Hn1plusconst, coeffn2Hn2crossconst, coeffn2Hn2plusconst, coeffn3Hn3crossconst, coeffn3Hn3plusconst;
static double coeffn1Hn1pluscos[4];
static double coeffn1Hn1plussin[4];
static double coeffn2Hn2pluscos[4];
static double coeffn2Hn2plussin[4];
static double coeffn3Hn3pluscos[4];
static double coeffn3Hn3plussin[4];
static double coeffn1Hn1crosscos[4];
static double coeffn1Hn1crosssin[4];
static double coeffn2Hn2crosscos[4];
static double coeffn2Hn2crosssin[4];
static double coeffn3Hn3crosscos[4];
static double coeffn3Hn3crosssin[4];
static double coeffkn1const, coeffkn2const, coeffkn3const, coeffkp1plusp2const, coeffkp2plusp3const, coeffkp3plusp1const, coeffkp1const, coeffkp2const, coeffkp3const, coeffkRconst;
static double coeffkn1cos[2];
static double coeffkn1sin[2];
static double coeffkn2cos[2];
static double coeffkn2sin[2];
static double coeffkn3cos[2];
static double coeffkn3sin[2];
static double coeffkp1plusp2cos[2];
static double coeffkp1plusp2sin[2];
static double coeffkp2plusp3cos[2];
static double coeffkp2plusp3sin[2];
static double coeffkp3plusp1cos[2];
static double coeffkp3plusp1sin[2];
static double coeffkp1cos[2];
static double coeffkp1sin[2];
static double coeffkp2cos[2];
static double coeffkp2sin[2];
static double coeffkp3cos[2];
static double coeffkp3sin[2];
static double coeffkRcos[2];
static double coeffkRsin[2];
static double cosarray[4];
static double sinarray[4];
#pragma omp threadprivate(coeffn1Hn1crossconst, coeffn1Hn1plusconst, coeffn2Hn2crossconst, coeffn2Hn2plusconst, coeffn3Hn3crossconst, coeffn3Hn3plusconst)
#pragma omp threadprivate(coeffn1Hn1pluscos,coeffn1Hn1plussin,coeffn2Hn2pluscos,coeffn2Hn2plussin,coeffn3Hn3pluscos,coeffn3Hn3plussin)
#pragma omp threadprivate(coeffn1Hn1crosscos,coeffn1Hn1crosssin,coeffn2Hn2crosscos,coeffn2Hn2crosssin,coeffn3Hn3crosscos,coeffn3Hn3crosssin)
#pragma omp threadprivate(coeffkn1const, coeffkn2const, coeffkn3const, coeffkp1plusp2const, coeffkp2plusp3const, coeffkp3plusp1const, coeffkp1const, coeffkp2const, coeffkp3const, coeffkRconst)
#pragma omp threadprivate(coeffkn1cos,coeffkn1sin,coeffkn2cos,coeffkn2sin,coeffkn3cos,coeffkn3sin)
#pragma omp threadprivate(coeffkp1cos,coeffkp1sin,coeffkp2cos,coeffkp2sin,coeffkp3cos,coeffkp3sin)
#pragma omp threadprivate(coeffkp1plusp2cos,coeffkp1plusp2sin,coeffkp2plusp3cos,coeffkp2plusp3sin,coeffkp3plusp1cos,coeffkp3plusp1sin)
#pragma omp threadprivate(coeffkRcos,coeffkRsin,cosarray,sinarray)
/*************************************************************/
/********* Functions for the geometric response **************/
/* Function to convert string input TDI string to TDItag */
TDItag ParseTDItag(char* string) {
TDItag tag;
if(strcmp(string, "delayO")==0) tag = delayO;
else if(strcmp(string, "y12L")==0) tag = y12L;
else if(strcmp(string, "y12")==0) tag = y12;
else if(strcmp(string, "TDIXYZ")==0) tag = TDIXYZ;
else if(strcmp(string, "TDIalphabetagamma")==0) tag = TDIalphabetagamma;
else if(strcmp(string, "TDIAETXYZ")==0) tag = TDIAETXYZ;
else if(strcmp(string, "TDIAETalphabetagamma")==0) tag = TDIAETalphabetagamma;
else if(strcmp(string, "TDIX")==0) tag = TDIX;
else if(strcmp(string, "TDIalpha")==0) tag = TDIalpha;
else if(strcmp(string, "TDIAXYZ")==0) tag = TDIAXYZ;
else if(strcmp(string, "TDIEXYZ")==0) tag = TDIEXYZ;
else if(strcmp(string, "TDITXYZ")==0) tag = TDITXYZ;
else if(strcmp(string, "TDIAalphabetagamma")==0) tag = TDIAalphabetagamma;
else if(strcmp(string, "TDIEalphabetagamma")==0) tag = TDIEalphabetagamma;
else if(strcmp(string, "TDITalphabetagamma")==0) tag = TDITalphabetagamma;
else {
printf("Error in ParseTDItag: string not recognized.\n");
exit(1);
}
return tag;
}
/* Function to convert string input ResponseApprox to tag */
ResponseApproxtag ParseResponseApproxtag(char* string) {
ResponseApproxtag tag;
if(strcmp(string, "full")==0) tag = full;
else if(strcmp(string, "lowfL")==0) tag = lowfL;
else if(strcmp(string, "lowf")==0) tag = lowf;
else {
printf("Error in ParseResponseApproxtag: string not recognized.\n");
exit(1);
}
return tag;
}
/* Compute Solar System Barycenter time tSSB from retarded time at the center of the LISA constellation tL */
/* NOTE: depends on the sky position given in SSB parameters */
double tSSBfromLframe(const LISAconstellation *variant, const double tL, const double lambdaSSB, const double betaSSB) {
double phase = variant->ConstOmega*tL + variant->ConstPhi0 - lambdaSSB;
double RoC = variant->OrbitR/C_SI;
return tL + RoC*cos(betaSSB)*cos(phase) - 1./2*variant->ConstOmega*pow(RoC*cos(betaSSB), 2)*sin(2.*phase);
}
/* Compute retarded time at the center of the LISA constellation tL from Solar System Barycenter time tSSB */
double tLfromSSBframe(const LISAconstellation *variant, const double tSSB, const double lambdaSSB, const double betaSSB) {
double phase = variant->ConstOmega*tSSB + variant->ConstPhi0 - lambdaSSB;
double RoC = variant->OrbitR/C_SI;
return tSSB - RoC*cos(betaSSB)*cos(phase);
}
/* Convert L-frame params to SSB-frame params */
/* NOTE: no transformation of the phase -- approximant-dependence with e.g. EOBNRv2HMROM setting phiRef at fRef, and freedom in definition */
int ConvertLframeParamsToSSBframe(
double* tSSB,
double* lambdaSSB,
double* betaSSB,
double* psiSSB,
const double tL,
const double lambdaL,
const double betaL,
const double psiL,
const LISAconstellation *variant)
{
double alpha = 0., cosalpha = 0, sinalpha = 0., coslambdaL = 0, sinlambdaL = 0., cosbetaL = 0., sinbetaL = 0., cospsiL = 0., sinpsiL = 0.;
double coszeta = cos(PI/3.);
double sinzeta = sin(PI/3.);
coslambdaL = cos(lambdaL);
sinlambdaL = sin(lambdaL);
cosbetaL = cos(betaL);
sinbetaL = sin(betaL);
cospsiL = cos(psiL);
sinpsiL = sin(psiL);
double lambdaSSB_approx = 0.;
double betaSSB_approx = 0.;
/* Initially, approximate alpha using tL instead of tSSB - then iterate */
double tSSB_approx = tL;
for(int k=0; k<3; k++) {
alpha = variant->ConstOmega * (tSSB_approx) + variant->ConstPhi0;
cosalpha = cos(alpha);
sinalpha = sin(alpha);
lambdaSSB_approx = atan2(cosalpha*cosalpha*cosbetaL*sinlambdaL -sinalpha*sinbetaL*sinzeta + cosbetaL*coszeta*sinalpha*sinalpha*sinlambdaL -cosalpha*cosbetaL*coslambdaL*sinalpha + cosalpha*cosbetaL*coszeta*coslambdaL*sinalpha, cosbetaL*coslambdaL*sinalpha*sinalpha -cosalpha*sinbetaL*sinzeta + cosalpha*cosalpha*cosbetaL*coszeta*coslambdaL -cosalpha*cosbetaL*sinalpha*sinlambdaL + cosalpha*cosbetaL*coszeta*sinalpha*sinlambdaL);
betaSSB_approx = asin(coszeta*sinbetaL + cosalpha*cosbetaL*coslambdaL*sinzeta + cosbetaL*sinalpha*sinzeta*sinlambdaL);
tSSB_approx = tSSBfromLframe(variant, tL, lambdaSSB_approx, betaSSB_approx);
}
*tSSB = tSSB_approx;
*lambdaSSB = lambdaSSB_approx;
*betaSSB = betaSSB_approx;
/* Polarization */
*psiSSB = modpi(psiL + atan2(cosalpha*sinzeta*sinlambdaL -coslambdaL*sinalpha*sinzeta, cosbetaL*coszeta -cosalpha*coslambdaL*sinbetaL*sinzeta -sinalpha*sinbetaL*sinzeta*sinlambdaL));
return SUCCESS;
}
/* Convert SSB-frame params to L-frame params */
/* NOTE: no transformation of the phase -- approximant-dependence with e.g. EOBNRv2HMROM setting phiRef at fRef, and freedom in definition */
int ConvertSSBframeParamsToLframe(
double* tL,
double* lambdaL,
double* betaL,
double* psiL,
const double tSSB,
const double lambdaSSB,
const double betaSSB,
const double psiSSB,
const LISAconstellation *variant)
{
double alpha = 0., cosalpha = 0, sinalpha = 0., coslambda = 0, sinlambda = 0., cosbeta = 0., sinbeta = 0., cospsi = 0., sinpsi = 0.;
double coszeta = cos(PI/3.);
double sinzeta = sin(PI/3.);
coslambda = cos(lambdaSSB);
sinlambda = sin(lambdaSSB);
cosbeta = cos(betaSSB);
sinbeta = sin(betaSSB);
cospsi = cos(psiSSB);
sinpsi = sin(psiSSB);
alpha = variant->ConstOmega * (tSSB) + variant->ConstPhi0;
cosalpha = cos(alpha);
sinalpha = sin(alpha);
*tL = tLfromSSBframe(variant, tSSB, lambdaSSB, betaSSB);
*lambdaL = atan2(cosalpha*cosalpha*cosbeta*sinlambda + sinalpha*sinbeta*sinzeta + cosbeta*coszeta*sinalpha*sinalpha*sinlambda -cosalpha*cosbeta*coslambda*sinalpha + cosalpha*cosbeta*coszeta*coslambda*sinalpha, cosalpha*sinbeta*sinzeta + cosbeta*coslambda*sinalpha*sinalpha + cosalpha*cosalpha*cosbeta*coszeta*coslambda -cosalpha*cosbeta*sinalpha*sinlambda + cosalpha*cosbeta*coszeta*sinalpha*sinlambda);
*betaL = asin(coszeta*sinbeta -cosalpha*cosbeta*coslambda*sinzeta -cosbeta*sinalpha*sinzeta*sinlambda);
*psiL = modpi(psiSSB + atan2(coslambda*sinalpha*sinzeta -cosalpha*sinzeta*sinlambda, cosbeta*coszeta + cosalpha*coslambda*sinbeta*sinzeta + sinalpha*sinbeta*sinzeta*sinlambda));
return SUCCESS;
}
/* Function cardinal sine */
double sinc(const double x) {
if (x==0)
return 1;
else return sin(x)/x;
}
/* Function to compute, given a value of a sky position and polarization, all the complicated time-independent trigonometric coefficients entering the response */
void SetCoeffsG(const double lambda, const double beta, const double psi) {
/* Precomputing cosines and sines */
double coslambda = cos(lambda);
double sinlambda = sin(lambda);
double cosbeta = cos(beta);
double sinbeta = sin(beta);
double cospsi = cos(psi);
double sinpsi = sin(psi);
/* Projection coefficients for hplus in n3.H.n3 */
/**/
coeffn3Hn3plusconst = 1./128 * (-4*cospsi*cospsi + 4*sinpsi*sinpsi -27*coslambda*coslambda*cospsi*cospsi -27*sinlambda*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*cospsi*cospsi -4*sinbeta*sinbeta*sinpsi*sinpsi + 4*cosbeta*cosbeta*sinpsi*sinpsi + 4*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*sinpsi*sinpsi + 27*cospsi*cospsi*sinlambda*sinlambda -9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -54*sqrt3*coslambda*sinlambda*sinpsi*sinpsi + 54*sqrt3*coslambda*cospsi*cospsi*sinlambda -144*coslambda*cospsi*sinbeta*sinlambda*sinpsi -72*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi -18*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 18*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 18*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 72*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi);
/**/
coeffn3Hn3pluscos[0] = 1./16*cosbeta * (-9*cospsi*cospsi*sinbeta*sinlambda + 9*sinbeta*sinlambda*sinpsi*sinpsi + 18*coslambda*cospsi*sinpsi -7*sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 7*sqrt3*coslambda*cospsi*cospsi*sinbeta + 14*sqrt3*cospsi*sinlambda*sinpsi);
/**/
coeffn3Hn3pluscos[1] = -3./64 * (-3*sinpsi*sinpsi + 3*cospsi*cospsi -6*coslambda*coslambda*cospsi*cospsi -6*sinlambda*sinlambda*sinpsi*sinpsi -3*cosbeta*cosbeta*sinpsi*sinpsi -3*cospsi*cospsi*sinbeta*sinbeta + 3*cosbeta*cosbeta*cospsi*cospsi + 3*sinbeta*sinbeta*sinpsi*sinpsi + 6*coslambda*coslambda*sinpsi*sinpsi + 6*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -2*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -2*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -2*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 2*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -32*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn3Hn3pluscos[2] = -1./16*cosbeta * (-6*coslambda*cospsi*sinpsi -3*sinbeta*sinlambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinbeta*sinlambda + sqrt3*coslambda*cospsi*cospsi*sinbeta -sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*sinlambda*sinpsi);
/**/
coeffn3Hn3pluscos[3] = 1./128 * (-3*coslambda*coslambda*cospsi*cospsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*cospsi*cospsi*sinlambda + 6*sqrt3*coslambda*sinlambda*sinpsi*sinpsi -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi -8*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 2*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn3Hn3plussin[0] = -1./16*cosbeta * (-9*coslambda*sinbeta*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi*sinbeta + 18*cospsi*sinlambda*sinpsi + sqrt3*sinbeta*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinlambda + 2*sqrt3*coslambda*cospsi*sinpsi);
/**/
coeffn3Hn3plussin[1] = 3./64 * (-3*sqrt3*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi -12*coslambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cosbeta*cosbeta*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinbeta*sinbeta + 3*sqrt3*cosbeta*cosbeta*cospsi*cospsi + 3*sqrt3*sinbeta*sinbeta*sinpsi*sinpsi + 12*coslambda*cospsi*cospsi*sinlambda -16*coslambda*coslambda*cospsi*sinbeta*sinpsi -4*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 4*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 4*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 16*cospsi*sinbeta*sinlambda*sinlambda*sinpsi);
/**/
coeffn3Hn3plussin[2] = 1./16*cosbeta * (-3*coslambda*sinbeta*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinbeta + 6*cospsi*sinlambda*sinpsi + sqrt3*sinbeta*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinlambda + 2*sqrt3*coslambda*cospsi*sinpsi);
/**/
coeffn3Hn3plussin[3] = 1./128 * (-6*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*coslambda*coslambda*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinlambda*sinlambda + 3*sqrt3*coslambda*coslambda*cospsi*cospsi + 3*sqrt3*sinlambda*sinlambda*sinpsi*sinpsi + 6*coslambda*sinlambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + sqrt3*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -8*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -sqrt3*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -sqrt3*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 2*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*coslambda*coslambda*cospsi*sinbeta*sinpsi + 16*sqrt3*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/* Projection coefficients for hcross in n3.H.n3 */
/**/
coeffn3Hn3crossconst = 1./64 * (4*cospsi*sinpsi -27*cospsi*sinlambda*sinlambda*sinpsi -4*cospsi*sinbeta*sinbeta*sinpsi + 4*cosbeta*cosbeta*cospsi*sinpsi + 27*coslambda*coslambda*cospsi*sinpsi -36*coslambda*cospsi*cospsi*sinbeta*sinlambda -18*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta -18*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 18*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi + 18*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -54*sqrt3*coslambda*cospsi*sinlambda*sinpsi -18*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi);
/**/
coeffn3Hn3crosscos[0] = 1./16*cosbeta * (-9*coslambda*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi -7*sqrt3*sinlambda*sinpsi*sinpsi + 7*sqrt3*cospsi*cospsi*sinlambda + 18*cospsi*sinbeta*sinlambda*sinpsi -14*sqrt3*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn3Hn3crosscos[1] = -3./32 * (-3*cospsi*sinpsi -6*cospsi*sinlambda*sinlambda*sinpsi -3*cosbeta*cosbeta*cospsi*sinpsi + 3*cospsi*sinbeta*sinbeta*sinpsi + 6*coslambda*coslambda*cospsi*sinpsi -8*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -2*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 2*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 8*coslambda*sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn3Hn3crosscos[2] = 1./16*cosbeta * (-3*coslambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi + sqrt3*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinlambda + 6*cospsi*sinbeta*sinlambda*sinpsi + 2*sqrt3*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn3Hn3crosscos[3] = 1./64 * (-3*cospsi*sinlambda*sinlambda*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi + 6*sqrt3*coslambda*cospsi*sinlambda*sinpsi -2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 2*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi);
/**/
coeffn3Hn3crosssin[0] = -1./16*cosbeta * (-9*sinlambda*sinpsi*sinpsi + 9*cospsi*cospsi*sinlambda + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi -18*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn3Hn3crosssin[1] = -3./32 * (-4*coslambda*coslambda*sinbeta*sinpsi*sinpsi -4*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 3*sqrt3*cospsi*sinpsi + 4*coslambda*coslambda*cospsi*cospsi*sinbeta + 4*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cospsi*sinbeta*sinbeta*sinpsi + 3*sqrt3*cosbeta*cosbeta*cospsi*sinpsi + 12*coslambda*cospsi*sinlambda*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 4*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi);
/**/
coeffn3Hn3crosssin[2] = 1./16*cosbeta * (-3*sinlambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi -6*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn3Hn3crosssin[3] = 1./64 * (-2*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 2*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*coslambda*coslambda*cospsi*sinpsi + 3*sqrt3*cospsi*sinlambda*sinlambda*sinpsi + 6*coslambda*cospsi*sinlambda*sinpsi + sqrt3*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi -4*sqrt3*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -2*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -sqrt3*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -sqrt3*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 2*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 4*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinlambda);
/* Projection coefficients for hplus in n2.H.n2 */
/**/
coeffn2Hn2plusconst = 1./128 * (-4*cospsi*cospsi + 4*sinpsi*sinpsi -27*coslambda*coslambda*cospsi*cospsi -27*sinlambda*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*cospsi*cospsi -4*sinbeta*sinbeta*sinpsi*sinpsi + 4*cosbeta*cosbeta*sinpsi*sinpsi + 4*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*sinpsi*sinpsi + 27*cospsi*cospsi*sinlambda*sinlambda -9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -54*sqrt3*coslambda*cospsi*cospsi*sinlambda + 54*sqrt3*coslambda*sinlambda*sinpsi*sinpsi -144*coslambda*cospsi*sinbeta*sinlambda*sinpsi -72*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -18*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -18*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 18*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 72*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn2Hn2pluscos[0] = 1./16*cosbeta * (-18*coslambda*cospsi*sinpsi -9*sinbeta*sinlambda*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinlambda -7*sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 7*sqrt3*coslambda*cospsi*cospsi*sinbeta + 14*sqrt3*cospsi*sinlambda*sinpsi);
/**/
coeffn2Hn2pluscos[1] = -3./64 * (-3*sinpsi*sinpsi + 3*cospsi*cospsi -6*coslambda*coslambda*cospsi*cospsi -6*sinlambda*sinlambda*sinpsi*sinpsi -3*cosbeta*cosbeta*sinpsi*sinpsi -3*cospsi*cospsi*sinbeta*sinbeta + 3*cosbeta*cosbeta*cospsi*cospsi + 3*sinbeta*sinbeta*sinpsi*sinpsi + 6*coslambda*coslambda*sinpsi*sinpsi + 6*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -2*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -2*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -2*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 2*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -32*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn2Hn2pluscos[2] = -1./16*cosbeta * (-3*cospsi*cospsi*sinbeta*sinlambda + 3*sinbeta*sinlambda*sinpsi*sinpsi + 6*coslambda*cospsi*sinpsi + sqrt3*coslambda*cospsi*cospsi*sinbeta -sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*sinlambda*sinpsi);
/**/
coeffn2Hn2pluscos[3] = 1./128 * (-3*coslambda*coslambda*cospsi*cospsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*sinlambda*sinpsi*sinpsi + 6*sqrt3*coslambda*cospsi*cospsi*sinlambda -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi -8*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi -2*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 2*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 2*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 8*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi);
/**/
coeffn2Hn2plussin[0] = 1./16*cosbeta * (-9*coslambda*sinbeta*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi*sinbeta + 18*cospsi*sinlambda*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*cospsi*sinpsi -sqrt3*sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn2Hn2plussin[1] = -3./64 * (-3*sqrt3*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi -12*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*cosbeta*cosbeta*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinbeta*sinbeta + 3*sqrt3*cosbeta*cosbeta*cospsi*cospsi + 3*sqrt3*sinbeta*sinbeta*sinpsi*sinpsi + 12*coslambda*sinlambda*sinpsi*sinpsi -16*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -4*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 4*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 4*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 16*coslambda*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn2Hn2plussin[2] = -1./16*cosbeta * (-3*coslambda*sinbeta*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinbeta + 6*cospsi*sinlambda*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*cospsi*sinpsi -sqrt3*sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn2Hn2plussin[3] = 1./128 * (-6*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*coslambda*coslambda*cospsi*cospsi -3*sqrt3*sinlambda*sinlambda*sinpsi*sinpsi + 3*sqrt3*coslambda*coslambda*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi*sinlambda*sinlambda + 6*coslambda*sinlambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + sqrt3*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -8*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sqrt3*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*coslambda*coslambda*cospsi*sinbeta*sinpsi -16*sqrt3*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/* Projection coefficients for hcross in n2.H.n2 */
/**/
coeffn2Hn2crossconst = 1./64 * (4*cospsi*sinpsi -27*cospsi*sinlambda*sinlambda*sinpsi -4*cospsi*sinbeta*sinbeta*sinpsi + 4*cosbeta*cosbeta*cospsi*sinpsi + 27*coslambda*coslambda*cospsi*sinpsi -36*coslambda*cospsi*cospsi*sinbeta*sinlambda -18*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi -18*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda -9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 18*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta + 18*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi + 54*sqrt3*coslambda*cospsi*sinlambda*sinpsi -18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 18*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi);
/**/
coeffn2Hn2crosscos[0] = -1./16*cosbeta * (-9*coslambda*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi -7*sqrt3*cospsi*cospsi*sinlambda + 7*sqrt3*sinlambda*sinpsi*sinpsi + 18*cospsi*sinbeta*sinlambda*sinpsi + 14*sqrt3*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn2Hn2crosscos[1] = -3./32 * (-3*cospsi*sinpsi -6*cospsi*sinlambda*sinlambda*sinpsi -3*cosbeta*cosbeta*cospsi*sinpsi + 3*cospsi*sinbeta*sinbeta*sinpsi + 6*coslambda*coslambda*cospsi*sinpsi -8*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -2*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 2*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 8*coslambda*sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn2Hn2crosscos[2] = -1./16*cosbeta * (-3*coslambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi + sqrt3*cospsi*cospsi*sinlambda -sqrt3*sinlambda*sinpsi*sinpsi + 6*cospsi*sinbeta*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn2Hn2crosscos[3] = 1./64 * (-3*cospsi*sinlambda*sinlambda*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta -2*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*cospsi*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi);
/**/
coeffn2Hn2crosssin[0] = -1./16*cosbeta * (-9*cospsi*cospsi*sinlambda + 9*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi + 18*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn2Hn2crosssin[1] = -3./32 * (-4*coslambda*coslambda*sinbeta*sinpsi*sinpsi -4*cospsi*cospsi*sinbeta*sinlambda*sinlambda -3*sqrt3*cospsi*sinpsi + 4*coslambda*coslambda*cospsi*cospsi*sinbeta + 4*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cosbeta*cosbeta*cospsi*sinpsi + 3*sqrt3*cospsi*sinbeta*sinbeta*sinpsi + 12*coslambda*cospsi*sinlambda*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 4*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi);
/**/
coeffn2Hn2crosssin[2] = 1./16*cosbeta * (-3*cospsi*cospsi*sinlambda + 3*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi + 6*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn2Hn2crosssin[3] = 1./64 * (-2*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 2*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cospsi*sinlambda*sinlambda*sinpsi + 3*sqrt3*coslambda*coslambda*cospsi*sinpsi + 6*coslambda*cospsi*sinlambda*sinpsi + sqrt3*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + sqrt3*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -sqrt3*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 4*sqrt3*coslambda*sinbeta*sinlambda*sinpsi*sinpsi);
/* Projection coefficients for hplus in n1.H.n1 */
/**/
coeffn1Hn1plusconst = 1./64 * (-2*cospsi*cospsi + 2*sinpsi*sinpsi -27*coslambda*coslambda*sinpsi*sinpsi -27*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*cospsi*cospsi -2*sinbeta*sinbeta*sinpsi*sinpsi + 2*cosbeta*cosbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*cospsi*cospsi + 27*sinlambda*sinlambda*sinpsi*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + 9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + 9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 144*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn1Hn1pluscos[0] = -1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi*sinbeta -coslambda*sinbeta*sinpsi*sinpsi + 2*cospsi*sinlambda*sinpsi);
/**/
coeffn1Hn1pluscos[1] = -3./32 * (-3*cospsi*cospsi + 3*sinpsi*sinpsi -3*cosbeta*cosbeta*cospsi*cospsi -3*coslambda*coslambda*cospsi*cospsi -3*sinbeta*sinbeta*sinpsi*sinpsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*cosbeta*cosbeta*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinbeta*sinbeta + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn1Hn1pluscos[2] = 1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi*sinbeta -coslambda*sinbeta*sinpsi*sinpsi + 2*cospsi*sinlambda*sinpsi);
/**/
coeffn1Hn1pluscos[3] = 1./64 * (-3*coslambda*coslambda*sinpsi*sinpsi -3*cospsi*cospsi*sinlambda*sinlambda + 3*coslambda*coslambda*cospsi*cospsi + 3*sinlambda*sinlambda*sinpsi*sinpsi + cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 16*coslambda*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn1Hn1plussin[0] = 5./8*sqrt3*cosbeta * (cospsi*cospsi*sinbeta*sinlambda -2*coslambda*cospsi*sinpsi -sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn1Hn1plussin[1] = -3./16 * (-3*coslambda*cospsi*cospsi*sinlambda + 3*coslambda*sinlambda*sinpsi*sinpsi + coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda -4*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 4*coslambda*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn1Hn1plussin[2] = 1./8*sqrt3*cosbeta * (cospsi*cospsi*sinbeta*sinlambda -2*coslambda*cospsi*sinpsi -sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn1Hn1plussin[3] = 1./32 * (-3*coslambda*sinlambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinlambda + coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -4*coslambda*coslambda*cospsi*sinbeta*sinpsi -coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 4*cospsi*sinbeta*sinlambda*sinlambda*sinpsi);
/* Projection coefficients for hcross in n1.H.n1 */
/**/
coeffn1Hn1crossconst = 1./32 * (2*cospsi*sinpsi -27*coslambda*coslambda*cospsi*sinpsi -2*cospsi*sinbeta*sinbeta*sinpsi + 2*cosbeta*cosbeta*cospsi*sinpsi + 27*cospsi*sinlambda*sinlambda*sinpsi -36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 36*coslambda*cospsi*cospsi*sinbeta*sinlambda);
/**/
coeffn1Hn1crosscos[0] = -1./8*sqrt3*cosbeta * (cospsi*cospsi*sinlambda -sinlambda*sinpsi*sinpsi -2*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn1Hn1crosscos[1] = -3./16 * (3*cospsi*sinpsi -3*cospsi*sinbeta*sinbeta*sinpsi -3*cospsi*sinlambda*sinlambda*sinpsi + 3*cosbeta*cosbeta*cospsi*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi);
/**/
coeffn1Hn1crosscos[2] = 1./8*sqrt3*cosbeta * (cospsi*cospsi*sinlambda -sinlambda*sinpsi*sinpsi -2*coslambda*cospsi*sinbeta*sinpsi);
/**/
coeffn1Hn1crosscos[3] = 1./32 * (-3*coslambda*coslambda*cospsi*sinpsi + 3*cospsi*sinlambda*sinlambda*sinpsi + cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi -4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 4*coslambda*cospsi*cospsi*sinbeta*sinlambda);
/**/
coeffn1Hn1crosssin[0] = -5./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi -coslambda*sinpsi*sinpsi + 2*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn1Hn1crosssin[1] = -3./8 * (coslambda*coslambda*cospsi*cospsi*sinbeta + sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -coslambda*coslambda*sinbeta*sinpsi*sinpsi -cospsi*cospsi*sinbeta*sinlambda*sinlambda + 3*coslambda*cospsi*sinlambda*sinpsi + coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi);
/**/
coeffn1Hn1crosssin[2] = -1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi -coslambda*sinpsi*sinpsi + 2*cospsi*sinbeta*sinlambda*sinpsi);
/**/
coeffn1Hn1crosssin[3] = 1./16 * (coslambda*coslambda*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta -sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*coslambda*cospsi*sinlambda*sinpsi + cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi);
/* Coefficients in k.n3 */
/**/
coeffkn3const = 3./8*cosbeta * (sinlambda -sqrt3*coslambda);
/**/
coeffkn3cos[0] = 3./4 * (-sinbeta);
/**/
coeffkn3cos[1] = -1./8*cosbeta * (-sinlambda -sqrt3*coslambda);
/**/
coeffkn3sin[0] = -1./4*sqrt3 * (-sinbeta);
/**/
coeffkn3sin[1] = 1./8*cosbeta * (-coslambda + sqrt3*sinlambda);
/* Coefficients in k.n2 */
/**/
coeffkn2const = -3./8*cosbeta * (-sinlambda -sqrt3*coslambda);
/**/
coeffkn2cos[0] = -3./4 * (-sinbeta);
/**/
coeffkn2cos[1] = 1./8*cosbeta * (sinlambda -sqrt3*coslambda);
/**/
coeffkn2sin[0] = -1./4*sqrt3 * (-sinbeta);
/**/
coeffkn2sin[1] = 1./8*cosbeta * (-coslambda -sqrt3*sinlambda);
/* Coefficients in k.n1 */
/**/
coeffkn1const = 3./4*cosbeta * (-sinlambda);
/**/
coeffkn1cos[0] = 0. ;
/**/
coeffkn1cos[1] = 1./4*cosbeta * (-sinlambda);
/**/
coeffkn1sin[0] = 1./2*sqrt3 * (-sinbeta);
/**/
coeffkn1sin[1] = -1./4*cosbeta * (-coslambda);
/* Coefficients in k.(p1+p2) */
/**/
coeffkp1plusp2const = -1./8*cosbeta * (-3*sinlambda -sqrt3*coslambda);
/**/
coeffkp1plusp2cos[0] = -1./4 * (-sinbeta);
/**/
coeffkp1plusp2cos[1] = 1./24*cosbeta * (3*sinlambda -sqrt3*coslambda);
/**/
coeffkp1plusp2sin[0] = -1./4*sqrt3 * (-sinbeta);
/**/
coeffkp1plusp2sin[1] = 1./24*cosbeta * (-3*coslambda -sqrt3*sinlambda);
/* Coefficients in k.(p2+p3) */
/**/
coeffkp2plusp3const = 1./4*sqrt3*cosbeta * (-coslambda);
/**/
coeffkp2plusp3cos[0] = 1./2 * (-sinbeta);
/**/
coeffkp2plusp3cos[1] = -1./4/sqrt3 * (-cosbeta*coslambda);
/**/
coeffkp2plusp3sin[0] = 0. ;
/**/
coeffkp2plusp3sin[1] = -1./4/sqrt3 * (-cosbeta*sinlambda);
/* Coefficients in k.(p3+p1) */
/**/
coeffkp3plusp1const = -1./8*cosbeta * (3*sinlambda -sqrt3*coslambda);
/**/
coeffkp3plusp1cos[0] = -1./4 * (-sinbeta);
/**/
coeffkp3plusp1cos[1] = 1./24*cosbeta * (-3*sinlambda -sqrt3*coslambda);
/**/
coeffkp3plusp1sin[0] = 1./4*sqrt3 * (-sinbeta);
/**/
coeffkp3plusp1sin[1] = -1./24*cosbeta * (-3*coslambda + sqrt3*sinlambda);
/* Coefficients in k.p1 */
/**/
coeffkp1const = -1./4*sqrt3 * (-cosbeta*coslambda);
/**/
coeffkp1cos[0] = -1./2 * (-sinbeta);
/**/
coeffkp1cos[1] = 1./(4*sqrt3) * (-cosbeta*coslambda);
/**/
coeffkp1sin[0] = 0. ;
/**/
coeffkp1sin[1] = 1./(4*sqrt3) * (-cosbeta*sinlambda);
/* Coefficients in k.p2 */
/**/
coeffkp2const = 1./8*cosbeta * (3*sinlambda -sqrt3*coslambda);
/**/
coeffkp2cos[0] = 1./4 * (-sinbeta);
/**/
coeffkp2cos[1] = -1./24*cosbeta * (-3*sinlambda -sqrt3*coslambda);
/**/
coeffkp2sin[0] = -1./4*sqrt3 * (-sinbeta);
/**/
coeffkp2sin[1] = 1./24*cosbeta * (-3*coslambda + sqrt3*sinlambda);
/* Coefficients in k.p3 */
/**/
coeffkp3const = 1./8*cosbeta * (-3*sinlambda -sqrt3*coslambda);
/**/
coeffkp3cos[0] = 1./4 * (-sinbeta);
/**/
coeffkp3cos[1] = -1./24*cosbeta * (3*sinlambda -sqrt3*coslambda);
/**/
coeffkp3sin[0] = 1./4*sqrt3 * (-sinbeta);
/**/
coeffkp3sin[1] = -1./24*cosbeta * (-3*coslambda -sqrt3*sinlambda);
/* Coefficients in k.R */
/**/
coeffkRconst = 0.;
coeffkRcos[0] = 1. * (-cosbeta*coslambda);
coeffkRsin[0] = 1. * (-cosbeta*sinlambda);
coeffkRcos[1] = 0.;
coeffkRsin[1] = 0.;
}
/*********************** Fourier-domain response ************************/
/* Individual functions GABmode: older version, does not include the orbital delay (was treated separately as Bessel phase) */
/* Collective function EvaluateGABmode: orbital delay included */
/* Conventions changed: now MLDC conventions */
/* Function evaluating G21, combining the two polarization with the spherical harmonics factors */
double complex G21mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
double kn3 = coeffkn3const;
double kp1plusp2 = coeffkp1plusp2const;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn3)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp1plusp2) );
}
/* Function evaluating G12, combining the two polarization with the spherical harmonics factors */
double complex G12mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase = variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
double kn3 = coeffkn3const;
double kp1plusp2 = coeffkp1plusp2const;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn3)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp1plusp2) );
}
/* Function evaluating G32, combining the two polarization with the spherical harmonics factors */
double complex G32mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
double n1Pn1plus = coeffn1Hn1plusconst;
double n1Pn1cross = coeffn1Hn1crossconst;
for(int j=0; j<4; j++) {
n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j];
n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j];
}
double kn1 = coeffkn1const;
double kp2plusp3 = coeffkp2plusp3const;
for(int j=0; j<2; j++) {
kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j];
kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn1)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp2plusp3) );
}
/* Function evaluating G23, combining the two polarization with the spherical harmonics factors */
double complex G23mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1)* phase);
sinarray[j] = sin((j+1)* phase);
}
double n1Pn1plus = coeffn1Hn1plusconst;
double n1Pn1cross = coeffn1Hn1crossconst;
for(int j=0; j<4; j++) {
n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j];
n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j];
}
double kn1 = coeffkn1const;
double kp2plusp3 = coeffkp2plusp3const;
for(int j=0; j<2; j++) {
kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j];
kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn1)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp2plusp3) );
}
/* Function evaluating G13, combining the two polarization with the spherical harmonics factors */
double complex G13mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
double n2Pn2plus = coeffn2Hn2plusconst;
double n2Pn2cross = coeffn2Hn2crossconst;
for(int j=0; j<4; j++) {
n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j];
n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j];
}
double kn2 = coeffkn2const;
double kp3plusp1 = coeffkp3plusp1const;
for(int j=0; j<2; j++) {
kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j];
kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn2)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp3plusp1) );
}
/* Function evaluating G31, combining the two polarization with the spherical harmonics factors */
double complex G31mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross)
{
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
double n2Pn2plus = coeffn2Hn2plusconst;
double n2Pn2cross = coeffn2Hn2crossconst;
for(int j=0; j<4; j++) {
n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j];
n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j];
}
double kn2 = coeffkn2const;
double kp3plusp1 = coeffkp3plusp1const;
for(int j=0; j<2; j++) {
kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j];
kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j];
}
return I*PI*f*variant->ConstL/C_SI * (n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn2)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp3plusp1) );
}
/* Function evaluating all coefficients G12, G21, G23, G32, G31, G13, combining the two polarization with the spherical harmonics factors */
/* Note: includes orbital delay */
int EvaluateGABmode(
const LISAconstellation *variant, /* Description of LISA variant */
double complex* G12, /* Output for G12 */
double complex* G21, /* Output for G21 */
double complex* G23, /* Output for G23 */
double complex* G32, /* Output for G32 */
double complex* G31, /* Output for G31 */
double complex* G13, /* Output for G13 */
const double f, /* Frequency */
const double t, /* Time */
const double complex Yfactorplus, /* Spin-weighted spherical harmonic factor for plus */
const double complex Yfactorcross, /* Spin-weighted spherical harmonic factor for cross */
const int tagdelayR, /* Tag: when 1, include the phase term of the R-delay */
const ResponseApproxtag responseapprox) /* Tag to select possible low-f approximation level in FD response */
{
double phase = variant->ConstOmega*t + variant->ConstPhi0;
/* Precompute array of sine/cosine */
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n1Pn1plus = coeffn1Hn1plusconst;
double n1Pn1cross = coeffn1Hn1crossconst;
double n2Pn2plus = coeffn2Hn2plusconst;
double n2Pn2cross = coeffn2Hn2crossconst;
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j];
n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j];
n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j];
n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j];
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
/* Scalar products with k */
double kn1 = coeffkn1const;
double kn2 = coeffkn2const;
double kn3 = coeffkn3const;
double kp1plusp2 = coeffkp1plusp2const;
double kp2plusp3 = coeffkp2plusp3const;
double kp3plusp1 = coeffkp3plusp1const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j];
kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j];
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j];
kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j];
kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factors */
double complex factn1Pn1 = n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross;
double complex factn2Pn2 = n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross;
double complex factn3Pn3 = n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross;
double prefactor = PI*f*variant->ConstL/C_SI;
double prefactorR = 2*PI*f*variant->OrbitR/C_SI;
double complex factorcexp12 = cexp(I*prefactor * (1.+kp1plusp2));
double complex factorcexp23 = cexp(I*prefactor * (1.+kp2plusp3));
double complex factorcexp31 = cexp(I*prefactor * (1.+kp3plusp1));
double factorsinc12 = sinc( prefactor * (1.-kn3));
double factorsinc21 = sinc( prefactor * (1.+kn3));
double factorsinc23 = sinc( prefactor * (1.-kn1));
double factorsinc32 = sinc( prefactor * (1.+kn1));
double factorsinc31 = sinc( prefactor * (1.-kn2));
double factorsinc13 = sinc( prefactor * (1.+kn2));
/* The tag tagdelayR allows to choose to include or not the R-delay phase term (here leading order) */
double complex factorcexpkR;
if(tagdelayR) factorcexpkR = cexp(I*prefactorR * kR);
else factorcexpkR = 1.;
/* Take into account level of approximation in for low-f response - choices are full, lowfL or lowf */
if(responseapprox==lowf) {
factorcexpkR = 1.;
}
if((responseapprox==lowfL)||(responseapprox==lowf)) {
factorsinc12 = 1.;
factorsinc21 = 1.;
factorsinc23 = 1.;
factorsinc32 = 1.;
factorsinc31 = 1.;
factorsinc13 = 1.;
factorcexp12 = 1.;
factorcexp23 = 1.;
factorcexp31 = 1.;
}
/* Output result */
*G12 = I*prefactor * factorcexpkR * factn3Pn3 * factorsinc12 * factorcexp12;
*G21 = I*prefactor * factorcexpkR * factn3Pn3 * factorsinc21 * factorcexp12;
*G23 = I*prefactor * factorcexpkR * factn1Pn1 * factorsinc23 * factorcexp23;
*G32 = I*prefactor * factorcexpkR * factn1Pn1 * factorsinc32 * factorcexp23;
*G31 = I*prefactor * factorcexpkR * factn2Pn2 * factorsinc31 * factorcexp31;
*G13 = I*prefactor * factorcexpkR * factn2Pn2 * factorsinc13 * factorcexp31;
return SUCCESS;
}
/*********************** Fourier-domain TDI factors ************************/
/* Functions evaluating the Fourier-domain factors (combinations of the GAB's) for TDI observables */
/* NOTE: factors have been scaled out, in parallel of what is done for the noise function */
/* Note: in case only one channel is considered, amplitudes for channels 2 and 3 are simply set to 0 */
/* (allows minimal changes from the old structure that assumed KTV A,E,T - but probably not optimal) */
int EvaluateTDIfactor3Chan(
const LISAconstellation *variant, /* Description of LISA variant */
double complex* factor1, /* Output for factor for TDI channel 1 */
double complex* factor2, /* Output for factor for TDI channel 2 */
double complex* factor3, /* Output for factor for TDI channel 3 */
const double complex G12, /* Input for G12 */
const double complex G21, /* Input for G21 */
const double complex G23, /* Input for G23 */
const double complex G32, /* Input for G32 */
const double complex G31, /* Input for G31 */
const double complex G13, /* Input for G13 */
const double f, /* Frequency */
const TDItag tditag, /* Selector for the TDI observables */
const ResponseApproxtag responseapprox) /* Tag to select possible low-f approximation level in FD response */
{
/* Notation: x=pifL, z=e^2ix*/
double x = PI*f*variant->ConstL/C_SI;
double complex z = cexp(2*I*x);
/* In both lowf and lowf-L approximations, ignore z factors - consitently ignore all TDI delays */
if((responseapprox==lowf)||(responseapprox==lowfL)) {
x = 0.;
z = 1.;
}
switch(tditag) {
/* For testing purposes: basic yAB observable - no factor */
case y12:
*factor1 = G12;
*factor2 = 0.;
*factor3 = 0.;
break;
/* For testing purposes: basic yABL observable - no factor, same as for yAB */
case y12L:
*factor1 = G12;
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation rescaled TDI aet from X,Y,Z */
/* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*e2ix - T 2*sqrt2*sin2x*sinx*e3ix */
case TDIAETXYZ:
*factor1 = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 );
*factor2 = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G21-G23) );
*factor3 = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31);
break;
/* First-generation rescaled TDI aet from alpha, beta, gamma */
/* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */
case TDIAETalphabetagamma:
*factor1 = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13));
*factor2 = 0.5*invsqrt3 * ((2.+z)*(G12-G32) + (1.+z)*(G21-G23) + (1.+2*z)*(G13-G31));
*factor3 = invsqrt3 * (G21-G12 + G32-G23 + G13-G31);
break;
/* First-generation TDI XYZ */
/* With x=pifL, factor scaled out: 2I*sin2x*e2ix */
case TDIXYZ:
*factor1 = G21 + z*G12 - G31 - z*G13;
*factor2 = G32 + z*G23 - G12 - z*G21;
*factor3 = G13 + z*G31 - G23 - z*G32;
break;
/* First-generation TDI alpha beta gamma */
case TDIalphabetagamma:
*factor1 = G21-G31 + z*(G13-G12) + z*z*(G32-G23);
*factor2 = G32-G12 + z*(G21-G23) + z*z*(G13-G31);
*factor3 = G13-G23 + z*(G32-G31) + z*z*(G21-G12);
break;
/* First-generation TDI XYZ */
case TDIX:
*factor1 = G21 + z*G12 - G31 - z*G13;
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation TDI alpha beta gamma */
case TDIalpha:
*factor1 = G21-G31 + z*(G13-G12) + z*z*(G32-G23);
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation rescaled TDI aet from X,Y,Z */
/* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix */
case TDIAXYZ:
*factor1 = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 );
*factor2 = 0.;
*factor3 = 0.;
break;
case TDIEXYZ:
*factor1 = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G12-G23) );
*factor2 = 0.;
*factor3 = 0.;
break;
case TDITXYZ:
*factor1 = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31);
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation rescaled TDI aet from alpha, beta, gamma */
/* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */
case TDIAalphabetagamma:
*factor1 = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13));
*factor2 = 0.;
*factor3 = 0.;
break;
case TDIEalphabetagamma:
*factor1 = 0.5*invsqrt3 * ((2.+z)*(G12-G32) + (1.+z)*(G21-G23) + (1.+2*z)*(G13-G31));
*factor2 = 0.;
*factor3 = 0.;
break;
case TDITalphabetagamma:
*factor1 = invsqrt3 * (G21-G12 + G32-G23 + G13-G31);
*factor2 = 0.;
*factor3 = 0.;
break;
default:
printf("Error in EvaluateTDIfactor3Chan: tditag not recognized.\n");
exit(1);
}
return SUCCESS;
}
/* Function evaluating the Fourier-domain factors that have been scaled out of TDI observables */
/* The factors scaled out, parallel what is done for the noise functions */
/* Note: in case only one channel is considered, factors for channels 2 and 3 are simply set to 0 */
int ScaledTDIfactor3Chan(
const LISAconstellation *variant, /* Description of LISA variant */
double complex* factor1, /* Output for factor for TDI factor 1 */
double complex* factor2, /* Output for factor for TDI factor 2 */
double complex* factor3, /* Output for factor for TDI factor 3 */
const double f, /* Frequency */
const TDItag tditag) /* Selector for the TDI observables */
{
/* Notation: x=pifL */
double x = PI*f*variant->ConstL/C_SI;
switch(tditag) {
/* First-generation rescaled TDI aet from X,Y,Z */
case TDIAETXYZ:
*factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x);
*factor2 = I*sqrt(2)*sin(2*x)*cexp(2*I*x);
*factor3 = 2*sqrt(2)*sin(x)*sin(2*x)*cexp(3*I*x);
break;
/* First-generation rescaled TDI aet from alpha, beta, gamma */
case TDIAETalphabetagamma:
*factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x);
*factor2 = -I*2*sqrt(2)*sin(x)*cexp(I*x);
*factor3 = sin(3*x)/sin(x)*cexp(I*x);
break;
/* First-generation TDI XYZ */
case TDIXYZ:
*factor1 = 2*I*sin(2*x)*cexp(2*I*x);
*factor2 = 2*I*sin(2*x)*cexp(2*I*x);
*factor3 = 2*I*sin(2*x)*cexp(2*I*x);
break;
/* First-generation TDI alpha beta gamma */
case TDIalphabetagamma:
*factor1 = 1.;
*factor2 = 1.;
*factor3 = 1.;
break;
/* First-generation TDI XYZ */
case TDIX:
*factor1 = 2*I*sin(2*x)*cexp(2*I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation TDI alpha beta gamma */
case TDIalpha:
*factor1 = 1.;
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation rescaled TDI aet from X,Y,Z */
/* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix */
case TDIAXYZ:
*factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
case TDIEXYZ:
*factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
case TDITXYZ:
*factor1 = 2*sqrt(2)*sin(x)*sin(2*x)*cexp(3*I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
/* First-generation rescaled TDI aet from alpha, beta, gamma */
/* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */
case TDIAalphabetagamma:
*factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
case TDIEalphabetagamma:
*factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
case TDITalphabetagamma:
*factor1 = sin(3*x)/sin(x)*cexp(I*x);
*factor2 = 0.;
*factor3 = 0.;
break;
default:
printf("Error in EvaluateTDIfactor3Chan: tditag not recognized.\n");
exit(1);
}
return SUCCESS;
}
/* Function restoring the factor that have been scaled out of the TDI observables */
/* NOTE: the operation is made in-place, and the input is overwritten */
int RestoreInPlaceScaledFactorTDI(
const LISAconstellation *variant, /* Description of LISA variant */
ListmodesCAmpPhaseFrequencySeries* listtdi, /* Output/Input: list of mode contributions to TDI observable */
TDItag tditag, /* Tag selecting the TDI observable */
int nchannel) /* TDI channel number */
{
double complex factor1 = 0;
double complex factor2 = 0;
double complex factor3 = 0;
double complex factor;
double complex camp;
ListmodesCAmpPhaseFrequencySeries* listelement = listtdi;
/* Going throug the list of modes */
while(listelement) {
gsl_vector* freq = listelement->freqseries->freq;
gsl_vector* ampreal = listelement->freqseries->amp_real;
gsl_vector* ampimag = listelement->freqseries->amp_imag;
for(int i=0; i<freq->size; i++) {
ScaledTDIfactor3Chan(variant,&factor1, &factor2, &factor3, gsl_vector_get(freq, i), tditag);
switch(nchannel) {
case 1: factor = factor1; break;
case 2: factor = factor2; break;
case 3: factor = factor3; break;
}
camp = factor * (gsl_vector_get(ampreal, i) + I*gsl_vector_get(ampimag, i));
gsl_vector_set(ampreal, i, creal(camp));
gsl_vector_set(ampimag, i, cimag(camp));
}
listelement = listelement->next;
}
return SUCCESS;
}
/* Functions evaluating the Fourier-domain factors (combinations of the GAB's) for TDI observables */
/* int EvaluateTDIfactor1Chan( */
/* double complex* factor, /\* Output for factor for TDI channel *\/ */
/* const double complex G12, /\* Input for G12 *\/ */
/* const double complex G21, /\* Input for G21 *\/ */
/* const double complex G23, /\* Input for G23 *\/ */
/* const double complex G32, /\* Input for G32 *\/ */
/* const double complex G31, /\* Input for G31 *\/ */
/* const double complex G13, /\* Input for G13 *\/ */
/* const double f, /\* Frequency *\/ */
/* const TDItag tditag) /\* Selector for the TDI observables *\/ */
/* { */
/* /\* Notation: x=pifL, z = e^2ix*\/ */
/* double x = PI*f*variant->ConstL/C_SI; */
/* double complex z = cexp(2*I*x); */
/* double sin2x = sin(2*x); */
/* double complex commonfac; */
/* switch(tditag) { */
/* /\* First-generation TDI XYZ *\/ */
/* case TDIX: { */
/* commonfac = 2*I*z*sin2x; */
/* *factor = commonfac * (G21 + z*G12 - G31 - z*G13); } */
/* case TDIY: { */
/* commonfac = 2*I*z*sin2x; */
/* *factor = commonfac * (G32 + z*G23 - G12 - z*G21); } */
/* case TDIZ: { */
/* commonfac = 2*I*z*sin2x; */
/* *factor = commonfac * (G13 + z*G31 - G23 - z*G32); } */
/* /\* First-generation TDI alpha beta gamma *\/ */
/* case TDIalpha: { */
/* *factor = G21-G31 + z*(G13-G12) + z*z*(G32-G23); } */
/* case TDIbeta: { */
/* *factor = G32-G12 + z*(G21-G23) + z*z*(G13-G31); } */
/* case TDIgamma: { */
/* *factor = G13-G23 + z*(G32-G31) + z*z*(G21-G12); } */
/* /\* First-generation rescaled TDI aet from X,Y,Z *\/ */
/* /\* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix *\/ */
/* case TDIAXYZ: { */
/* *factor = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 ); } */
/* case TDIEXYZ: { */
/* *factor = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G12-G23) ); } */
/* case TDITXYZ: { */
/* *factor = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31); } */
/* /\* First-generation rescaled TDI aet from alpha, beta, gamma *\/ */
/* /\* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) *\/ */
/* case TDIAalphabetagamma: { */
/* *factor = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13)); } */
/* case TDIEalphabetagamma: { */
/* *factor = 0.5*invsqrt3 * ((2+z)*(G12-G32) + (1+z)*(G21-G23) + (1.+2*z)*(G13-G31)); } */
/* case TDITalphabetagamma: { */
/* *factor = invsqrt3 * (G21-G12 + G32-G23 + G13-G31); } */
/* default: { */
/* printf("Error in EvaluateTDIfactor3Chan: tditag not recognized."); */
/* exit(1); } */
/* } */
/* } */
/*********************** Time-domain response ************************/
/* Processing single mode in amp/phase form through orbital time delay */
static double hOTDAmpPhase(
const LISAconstellation *variant, /* Description of LISA variant */
double* amp, /* Output: amplitude */
double* phase, /* Output: phase */
gsl_spline* splineamp, /* Input spline for TD mode amplitude */
gsl_spline* splinephase, /* Input spline for TD mode phase */
gsl_interp_accel* accelamp, /* Accelerator for amp spline */
gsl_interp_accel* accelphase, /* Accelerator for phase spline */
const double t) /* Time */
{
double tphase=variant->ConstOmega*t + variant->ConstPhi0;
/* Precompute array of sine/cosine */
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * tphase);
sinarray[j] = sin((j+1) * tphase);
}
/* Scalar product k.R */
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double delay = -(kR*variant->OrbitR)/C_SI;
/* Output result */
*amp = gsl_spline_eval(splineamp, t+delay, accelamp);
*phase = gsl_spline_eval(splinephase, t+delay, accelphase);
}
/* Functions evaluating yAB observables in time domain - constellation response only */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
static double y12LTDfromh22AmpPhase(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splineamp, /* Input spline for h22 TD amp */
gsl_spline* splinephase, /* Input spline for h22 TD phase */
gsl_interp_accel* accelamp, /* Accelerator for amp spline */
gsl_interp_accel* accelphase, /* Accelerator for phase spline */
double complex Y22, /* Y22 factor needed to convert h22 to hplus, hcross */
double complex Y2m2, /* Y2-2 factor needed to convert h2-2 to hplus, hcross */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
/* Scalar products with k */
double kn3 = coeffkn3const;
double kp1 = coeffkp1const;
double kp2 = coeffkp2const;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
}
/* Common factor and delay */
double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus;
double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross;
double firstdelay = -((kp1 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kp2*variant->ConstL)/C_SI;
/* Values of Y22*h22 + Y2-2*h2-2 at 1 and 2 with delays, and hplus, hcross */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
double A22at1 = gsl_spline_eval(splineamp, t+firstdelay, accelamp);
double phi22at1 = gsl_spline_eval(splinephase, t+firstdelay, accelphase);
double A22at2 = gsl_spline_eval(splineamp, t+seconddelay, accelamp);
double phi22at2 = gsl_spline_eval(splinephase, t+seconddelay, accelphase);
double complex Y22h22at1 = Y22 * A22at1 * cexp(I*phi22at1);
double complex Y22h22at2 = Y22 * A22at2 * cexp(I*phi22at2);
double complex Y2m2h2m2at1 = Y2m2 * A22at1 * cexp(-I*phi22at1);
double complex Y2m2h2m2at2 = Y2m2 * A22at2 * cexp(-I*phi22at2);
double hp1 = creal(Y22h22at1 + Y2m2h2m2at1);
double hc1 = -cimag(Y22h22at1 + Y2m2h2m2at1);
double hp2 = creal(Y22h22at2 + Y2m2h2m2at2);
double hc2 = -cimag(Y22h22at2 + Y2m2h2m2at2);
/* Result */
double y12 = factorp*(hp1 - hp2) + factorc*(hc1 - hc2);
return y12;
}
/* Functions evaluating yAB observables in time domain - orbital and constellation response */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
static double y12TDfromh22AmpPhase(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splineamp, /* Input spline for h22 TD amp */
gsl_spline* splinephase, /* Input spline for h22 TD phase */
gsl_interp_accel* accelamp, /* Accelerator for amp spline */
gsl_interp_accel* accelphase, /* Accelerator for phase spline */
double complex Y22, /* Y22 factor needed to convert h22 to hplus, hcross */
double complex Y2m2, /* Y2-2 factor needed to convert h2-2 to hplus, hcross */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar product k.R */
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double delay0 = -(kR*variant->OrbitR)/C_SI;
/* Scalar products with k */
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
/* Scalar products with k */
double kn3 = coeffkn3const;
double kp1 = coeffkp1const;
double kp2 = coeffkp2const;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
}
/* Common factor and delay */
double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus;
double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross;
double firstdelay = delay0 - ((kp1 + 1)*variant->ConstL)/C_SI;
double seconddelay = delay0 - (kp2*variant->ConstL)/C_SI;
/* Values of Y22*h22 + Y2-2*h2-2 at 1 and 2 with delays, and hplus, hcross */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
double A22at1 = gsl_spline_eval(splineamp, t+firstdelay, accelamp);
double phi22at1 = gsl_spline_eval(splinephase, t+firstdelay, accelphase);
double A22at2 = gsl_spline_eval(splineamp, t+seconddelay, accelamp);
double phi22at2 = gsl_spline_eval(splinephase, t+seconddelay, accelphase);
double complex Y22h22at1 = Y22 * A22at1 * cexp(I*phi22at1);
double complex Y22h22at2 = Y22 * A22at2 * cexp(I*phi22at2);
double complex Y2m2h2m2at1 = Y2m2 * A22at1 * cexp(-I*phi22at1);
double complex Y2m2h2m2at2 = Y2m2 * A22at2 * cexp(-I*phi22at2);
double hp1 = creal(Y22h22at1 + Y2m2h2m2at1);
double hc1 = -cimag(Y22h22at1 + Y2m2h2m2at1);
double hp2 = creal(Y22h22at2 + Y2m2h2m2at2);
double hc2 = -cimag(Y22h22at2 + Y2m2h2m2at2);
/* Result */
double y12 = factorp*(hp1 - hp2) + factorc*(hc1 - hc2);
return y12;
}
/* Functions evaluating yAB observables in time domain */
double y12TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
/* Scalar products with k */
double kn3 = coeffkn3const;
double kp1 = coeffkp1const;
double kp2 = coeffkp2const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus;
double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross;
double firstdelay = -(kR*variant->OrbitR + (kp1 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp2*variant->ConstL)/C_SI;
/* Result */
double y12 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y12;
}
double y21TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n3Pn3plus = coeffn3Hn3plusconst;
double n3Pn3cross = coeffn3Hn3crossconst;
for(int j=0; j<4; j++) {
n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j];
n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j];
}
/* Scalar products with k */
double kn3 = coeffkn3const;
double kp1 = coeffkp1const;
double kp2 = coeffkp2const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.+kn3)) * 0.5*n3Pn3plus;
double factorc = (1./(1.+kn3)) * 0.5*n3Pn3cross;
double firstdelay = -(kR*variant->OrbitR + (kp2 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp1*variant->ConstL)/C_SI;
/* Result */
double y21 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y21;
}
double y23TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n1Pn1plus = coeffn1Hn1plusconst;
double n1Pn1cross = coeffn1Hn1crossconst;
for(int j=0; j<4; j++) {
n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j];
n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j];
}
/* Scalar products with k */
double kn1 = coeffkn1const;
double kp2 = coeffkp2const;
double kp3 = coeffkp3const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.-kn1)) * 0.5*n1Pn1plus;
double factorc = (1./(1.-kn1)) * 0.5*n1Pn1cross;
double firstdelay = -(kR*variant->OrbitR + (kp2 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp3*variant->ConstL)/C_SI;
/* Result */
double y23 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y23;
}
double y32TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n1Pn1plus = coeffn1Hn1plusconst;
double n1Pn1cross = coeffn1Hn1crossconst;
for(int j=0; j<4; j++) {
n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j];
n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j];
}
/* Scalar products with k */
double kn1 = coeffkn1const;
double kp2 = coeffkp2const;
double kp3 = coeffkp3const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j];
kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j];
kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.+kn1)) * 0.5*n1Pn1plus;
double factorc = (1./(1.+kn1)) * 0.5*n1Pn1cross;
double firstdelay = -(kR*variant->OrbitR + (kp3 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp2*variant->ConstL)/C_SI;
/* Result */
double y32 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y32;
}
double y31TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n2Pn2plus = coeffn2Hn2plusconst;
double n2Pn2cross = coeffn2Hn2crossconst;
for(int j=0; j<4; j++) {
n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j];
n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j];
}
/* Scalar products with k */
double kn2 = coeffkn2const;
double kp3 = coeffkp3const;
double kp1 = coeffkp1const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j];
kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.-kn2)) * 0.5*n2Pn2plus;
double factorc = (1./(1.-kn2)) * 0.5*n2Pn2cross;
double firstdelay = -(kR*variant->OrbitR + (kp3 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp1*variant->ConstL)/C_SI;
/* Result */
double y31 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y31;
}
double y13TD(
const LISAconstellation *variant, /* Description of LISA variant */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
/* Precompute array of sine/cosine */
double phase=variant->ConstOmega*t + variant->ConstPhi0;
for(int j=0; j<4; j++) {
cosarray[j] = cos((j+1) * phase);
sinarray[j] = sin((j+1) * phase);
}
/* Scalar products with k */
double n2Pn2plus = coeffn2Hn2plusconst;
double n2Pn2cross = coeffn2Hn2crossconst;
for(int j=0; j<4; j++) {
n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j];
n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j];
}
/* Scalar products with k */
double kn2 = coeffkn2const;
double kp3 = coeffkp3const;
double kp1 = coeffkp1const;
double kR = coeffkRconst;
for(int j=0; j<2; j++) {
kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j];
kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j];
kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j];
kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j];
}
/* Common factor and delay */
double factorp = (1./(1.+kn2)) * 0.5*n2Pn2plus;
double factorc = (1./(1.+kn2)) * 0.5*n2Pn2cross;
double firstdelay = -(kR*variant->OrbitR + (kp1 + 1)*variant->ConstL)/C_SI;
double seconddelay = -(kR*variant->OrbitR + kp3*variant->ConstL)/C_SI;
/* Result */
double y13 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc));
return y13;
}
/**/
int EvaluateTDIXYZTD(
const LISAconstellation *variant, /* Description of LISA variant */
double* TDIX, /* Output: value of TDI observable X */
double* TDIY, /* Output: value of TDI observable Y */
double* TDIZ, /* Output: value of TDI observable Z */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
double armdelay = variant->ConstL/C_SI;
double X = (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
double Y = (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
double Z = (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
/* Output */
*TDIX = X;
*TDIY = Y;
*TDIZ = Z;
return SUCCESS;
}
/**/
int EvaluateTDIAETXYZTD(
const LISAconstellation *variant, /* Description of LISA variant */
double* TDIA, /* Output: value of TDI observable X */
double* TDIE, /* Output: value of TDI observable Y */
double* TDIT, /* Output: value of TDI observable Z */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
const double t) /* Time */
{
double armdelay = variant->ConstL/C_SI;
double X = (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
double Y = (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
double Z = (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay));
/* Output */
*TDIA = 1./(2*sqrt(2)) * (Z-X);
*TDIE = 1./(2*sqrt(6)) * (X-2*Y+Z);
*TDIT = 1./(2*sqrt(3)) * (X+Y+Z);
return SUCCESS;
}
/**/
int GenerateTDITD3Chanhphc(
const LISAconstellation *variant, /* Description of LISA variant */
RealTimeSeries** TDI1, /* Output: real time series for TDI channel 1 */
RealTimeSeries** TDI2, /* Output: real time series for TDI channel 2 */
RealTimeSeries** TDI3, /* Output: real time series for TDI channel 3 */
gsl_spline* splinehp, /* Input spline for TD hplus */
gsl_spline* splinehc, /* Input spline for TD hcross */
gsl_interp_accel* accelhp, /* Accelerator for hp spline */
gsl_interp_accel* accelhc, /* Accelerator for hc spline */
gsl_vector* times, /* Vector of times to evaluate */
int nbptmargin, /* Margin set to 0 on both side to avoid problems with delays out of the domain */
TDItag tditag) /* Tag selecting the TDI observables */
{
/* Initialize output */
int nbpt = times->size;
RealTimeSeries_Init(TDI1, nbpt);
RealTimeSeries_Init(TDI2, nbpt);
RealTimeSeries_Init(TDI3, nbpt);
gsl_vector_memcpy((*TDI1)->times, times);
gsl_vector_memcpy((*TDI2)->times, times);
gsl_vector_memcpy((*TDI3)->times, times);
gsl_vector_set_zero((*TDI1)->h);
gsl_vector_set_zero((*TDI2)->h);
gsl_vector_set_zero((*TDI3)->h);
/* Loop over time samples - we take a margin to avoid problems with the domain */
double t;
double* tval = times->data;
double* tdi1 = (*TDI1)->h->data;
double* tdi2 = (*TDI2)->h->data;
double* tdi3 = (*TDI3)->h->data;
double tdi1val = 0, tdi2val = 0, tdi3val = 0;
/* For testing purposes: basic observable yAB */
if(tditag==y12) {
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
tdi1[i] = y12TD(variant, splinehp, splinehc, accelhp, accelhc, t);
tdi2[i] = 0.;
tdi3[i] = 0.;
}
}
else if(tditag==TDIXYZ) {
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
EvaluateTDIXYZTD(variant, &tdi1val, &tdi2val, &tdi3val, splinehp, splinehc, accelhp, accelhc, t);
tdi1[i] = tdi1val;
tdi2[i] = tdi2val;
tdi3[i] = tdi3val;
}
}
else if(tditag==TDIAETXYZ) {
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
EvaluateTDIAETXYZTD(variant, &tdi1val, &tdi2val, &tdi3val, splinehp, splinehc, accelhp, accelhc, t);
tdi1[i] = tdi1val;
tdi2[i] = tdi2val;
tdi3[i] = tdi3val;
}
}
else {
printf("Error: in GenerateTDITD3Chan, TDI tag not recognized.\n");
}
return SUCCESS;
}
/* Generate hO orbital-delayed for one mode contribution from amp, phase */
int Generateh22TDO(
const LISAconstellation *variant, /* Description of LISA variant */
AmpPhaseTimeSeries** h22tdO, /* Output: amp/phase time series for h22TDO */
gsl_spline* splineamp, /* Input spline for TD mode amplitude */
gsl_spline* splinephase, /* Input spline for TD mode phase */
gsl_interp_accel* accelamp, /* Accelerator for amp spline */
gsl_interp_accel* accelphase, /* Accelerator for phase spline */
gsl_vector* times, /* Vector of times to evaluate */
int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */
{
/* Initialize output */
int nbpt = times->size;
AmpPhaseTimeSeries_Init(h22tdO, nbpt);
gsl_vector_memcpy((*h22tdO)->times, times);
gsl_vector_set_zero((*h22tdO)->h_amp);
gsl_vector_set_zero((*h22tdO)->h_phase);
/* Loop over time samples - we take a margin to avoid problems with the domain */
double t;
double* tval = times->data;
double* amp = (*h22tdO)->h_amp->data;
double* phase = (*h22tdO)->h_phase->data;
/* Loop over time samples */
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
hOTDAmpPhase(variant,&(amp[i]), &(phase[i]), splineamp, splinephase, accelamp, accelphase, t);
}
return SUCCESS;
}
/* Generate y12L from orbital-delayed h22 in amp/phase form */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
/* BEWARE: this ignores the fact that processing through orbital delay breaks the h2-2 = h22* symmetry */
int Generatey12LTD(
const LISAconstellation *variant, /* Description of LISA variant */
RealTimeSeries** y12Ltd, /* Output: real time series for y12L */
gsl_spline* splineamp, /* Input spline for h22 TD amplitude */
gsl_spline* splinephase, /* Input spline for h22 TD phase */
gsl_interp_accel* accelamp, /* Accelerator for h22 amp spline */
gsl_interp_accel* accelphase, /* Accelerator for h22 phase spline */
gsl_vector* times, /* Vector of times to evaluate */
double Theta, /* Inclination */
double Phi, /* Phase */
int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */
{
/* Initialize output */
int nbpt = times->size;
RealTimeSeries_Init(y12Ltd, nbpt);
gsl_vector_memcpy((*y12Ltd)->times, times);
gsl_vector_set_zero((*y12Ltd)->h);
/* Spin-weighted spherical harmonic Y22 and Y2-2 */
double complex Y22 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, 2);
double complex Y2m2 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, -2);
/* Loop over time samples - we take a margin to avoid problems with the domain */
double t;
double* tval = times->data;
double* y12val = (*y12Ltd)->h->data;
/* Loop over time samples */
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
y12val[i] = y12LTDfromh22AmpPhase(variant, splineamp, splinephase, accelamp, accelphase, Y22, Y2m2, t);
}
return SUCCESS;
}
/* Generate y12 from original h22 in amp/phase form, including both */
/* Here no approximation made as to the decomposition of the response in two steps, all the response is evaluated at once */
/* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */
int Generatey12TD(
const LISAconstellation *variant, /* Description of LISA variant */
RealTimeSeries** y12td, /* Output: real time series for y12L */
gsl_spline* splineamp, /* Input spline for h22 TD amplitude */
gsl_spline* splinephase, /* Input spline for h22 TD phase */
gsl_interp_accel* accelamp, /* Accelerator for h22 amp spline */
gsl_interp_accel* accelphase, /* Accelerator for h22 phase spline */
gsl_vector* times, /* Vector of times to evaluate */
double Theta, /* Inclination */
double Phi, /* Phase */
int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */
{
/* Initialize output */
int nbpt = times->size;
RealTimeSeries_Init(y12td, nbpt);
gsl_vector_memcpy((*y12td)->times, times);
gsl_vector_set_zero((*y12td)->h);
/* Spin-weighted spherical harmonic Y22 and Y2-2 */
double complex Y22 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, 2);
double complex Y2m2 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, -2);
/* Loop over time samples - we take a margin to avoid problems with the domain */
double t;
double* tval = times->data;
double* y12val = (*y12td)->h->data;
/* Loop over time samples */
for(int i=nbptmargin; i<nbpt-nbptmargin; i++) {
t = tval[i];
y12val[i] = y12TDfromh22AmpPhase(variant, splineamp, splinephase, accelamp, accelphase, Y22, Y2m2, t);
}
return SUCCESS;
}
|
multisort-omp-depend.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
#define BLOCK_SIZE 1024L
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) {
if (length < MIN_MERGE_SIZE*2L) {
// Base case
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
#pragma omp task
merge(n, left, right, result, start, length/2);
#pragma omp task
merge(n, left, right, result, start + length/2, length/2);
#pragma omp taskwait
}
}
void multisort(long n, T data[n], T tmp[n]) {
if (n >= MIN_SORT_SIZE*4L) {
// Recursive decomposition
#pragma omp task depend(out: data[0])
multisort(n/4L, &data[0], &tmp[0]);
#pragma omp task depend(out: data[n/4L])
multisort(n/4L, &data[n/4L], &tmp[n/4L]);
#pragma omp task depend(out: data[n/2L])
multisort(n/4L, &data[n/2L], &tmp[n/2L]);
#pragma omp task depend(out: data[3L*n/4L])
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]);
#pragma omp task depend(in: data[0], data[n/4L]) depend(out: tmp[0])
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L);
#pragma omp task depend(in: data[n/2L], data[3L*n/4L]) depend(out: tmp[n/2L])
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L);
#pragma omp task depend(in: tmp[0], tmp[n/2L])
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n);
#pragma omp taskwait
} else {
// Base case
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
else {
// printf ("data IS ordered; ");
}
}
int main(int argc, char **argv) {
if (argc != 4) {
fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]);
return 1;
}
N = atol(argv[1]) * BLOCK_SIZE;
MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE;
MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE;
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
initialize(N, data);
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
return 0;
}
|
ep_single.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
//#include "npbparams.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'B'
#endif
#if CLASS == 'S'
/* CLASS = S */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define CLASS 'S'
#define M 24
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'W'
/* CLASS = W */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define CLASS 'W'
#define M 25
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'A'
/* CLASS = A */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define CLASS 'A'
#define M 28
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'B'
/* CLASS = B */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define CLASS 'B'
#define M 30
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'C'
/* CLASS = C */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define CLASS 'C'
#define M 32
#define CONVERTDOUBLE FALSE
#endif
#define COMPILETIME "28 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
/* parameters */
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
static double x[2*NK];
#pragma omp threadprivate(x)
static double q[NQ];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
int main(int argc, char **argv) {
double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc;
double dum[3] = { 1.0, 1.0, 1.0 };
int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode,
no_large_nodes, np_add, k_offset, j;
int nthreads = 1;
boolean verified;
char size[13+1]; /* character*13 */
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - EP Benchmark\n");
sprintf(size, "%12.0f", pow(2.0, M+1));
for (j = 13; j >= 1; j--) {
if (size[j] == '.') size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n", size);
verified = FALSE;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = NN;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
for (i = 0; i < 2*NK; i++) x[i] = -1.0e99;
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for ( i = 0; i <= NQ - 1; i++) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = -1;
#pragma omp parallel copyin(x)
{
double t1, t2, t3, t4, x1, x2;
int kk, i, ik, l;
double qq[NQ]; /* private copy of q[0:NQ-1] */
for (i = 0; i < NQ; i++) qq[i] = 0.0;
#pragma omp for reduction(+:sx,sy) schedule(static)
for (k = 1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if (2 * ik != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (TIMERS_ENABLED == TRUE) timer_start(3);
vranlc(2*NK, &t1, A, x-1);
if (TIMERS_ENABLED == TRUE) timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (TIMERS_ENABLED == TRUE) timer_start(2);
for ( i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = pow2(x1) + pow2(x2);
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2); /* Xi */
t4 = (x2 * t2); /* Yi */
l = max(fabs(t3), fabs(t4));
qq[l] += 1.0; /* counts */
sx = sx + t3; /* sum of Xi */
sy = sy + t4; /* sum of Yi */
}
}
if (TIMERS_ENABLED == TRUE) timer_stop(2);
}
#pragma omp critical
{
for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end of parallel region */
for (i = 0; i <= NQ-1; i++) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (M == 24) {
if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 25) {
if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 28) {
{
if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) &&
(fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) {
verified = TRUE;
}
printf("Debug: 231, sx is:%f, sy is:%f\n",sx,sy);
}
} else if (M == 30) {
if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) &&
(fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 32) {
if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) &&
(fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) {
verified = TRUE;
}
}
Mops = pow(2.0, M+1)/tm/1000000.0;
printf("EP Benchmark Results: \n"
"CPU Time = %10.4f\n"
"N = 2^%5d\n"
"No. Gaussian Pairs = %15.0f\n"
"Sums = %25.15e %25.15e\n"
"Counts:\n",
tm, M, gc, sx, sy);
for (i = 0; i <= NQ-1; i++) {
printf("%3d %15.0f\n", i, q[i]);
}
c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) {
printf("Total time: %f", timer_read(1));
printf("Gaussian pairs: %f", timer_read(2));
printf("Random numbers: %f", timer_read(3));
}
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
// gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
// common/c_randdp.c
/*
*/
#if defined(USE_POW)
#define r23 pow(0.5, 23.0)
#define r46 (r23*r23)
#define t23 pow(2.0, 23.0)
#define t46 (t23*t23)
#else
#define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5)
#define r46 (r23*r23)
#define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0)
#define t46 (t23*t23)
#endif
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
double randlc (double *x, double a) {
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------*/
double t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * (*x);
x1 = (int)t1;
x2 = (*x) - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
(*x) = t3 - t46 * t4;
return (r46 * (*x));
}
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
void vranlc (int n, double *x_seed, double a, double* y) {
/* void vranlc (int n, double *x_seed, double a, double y[]) { */
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------*/
int i;
double x,t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
x = *x_seed;
/*c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------*/
for (i = 1; i <= n; i++) {
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * x;
x1 = (int)t1;
x2 = x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
x = t3 - t46 * t4;
y[i] = r46 * x;
}
*x_seed = x;
}
|
GB_binop__bclr_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int8)
// C=scalar+B GB (_bind1st__bclr_int8)
// C=scalar+B' GB (_bind1st_tran__bclr_int8)
// C=A+scalar GB (_bind2nd__bclr_int8)
// C=A'+scalar GB (_bind2nd_tran__bclr_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB051-getthreadnum-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
omp_get_thread_num() is used to ensure serial semantics.
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
int main()
{
omprace_init();
int numThreads=0 ;
#pragma omp parallel
{
if ( omp_get_thread_num()==0 ) {
numThreads = omp_get_num_threads();
}
}
printf ("numThreads=%d\n", numThreads);
omprace_fini();
return 0;
}
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(4 * 4, inch, outch, (size_t)2u);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q + 1);
const short* k2 = kernel0_tm.row<short>(q + 2);
const short* k3 = kernel0_tm.row<short>(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 2);
int* outRow1 = out.row<int>(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(6 * 6, inch, outch, (size_t)2u);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 24}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6 * 6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 6] = d1[n];
out_tm0[n + 12] = d2[n];
out_tm0[n + 18] = d3[n];
out_tm0[n + 24] = d4[n];
out_tm0[n + 30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q = 0; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 4);
int* outRow1 = out.row<int>(j * 4 + 1);
int* outRow2 = out.row<int>(j * 4 + 2);
int* outRow3 = out.row<int>(j * 4 + 3);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
double_reduction_minus.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
double result = 0;
#pragma omp parallel reduction(-:result)
{
result -= omp_get_thread_num();
}
printf("Result: %f\n", result);
}
|
WaveFunctionComponent.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2020 QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H
#include "Message/Communicate.h"
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "Particle/DistanceTableData.h"
#include "OhmmsData/RecordProperty.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "Particle/MCWalkerConfiguration.h"
#include "type_traits/template_types.hpp"
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
/**@file WaveFunctionComponent.h
*@brief Declaration of WaveFunctionComponent
*/
namespace qmcplusplus
{
#ifdef QMC_CUDA
struct NLjob
{
int walker;
int elec;
int numQuadPoints;
NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {}
};
#endif
///forward declaration of WaveFunctionComponent
class WaveFunctionComponent;
///forward declaration of DiffWaveFunctionComponent
class DiffWaveFunctionComponent;
typedef WaveFunctionComponent* WaveFunctionComponentPtr;
typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr;
/**@defgroup WaveFunctionComponent group
* @brief Classes which constitute a many-body trial wave function
*
* A many-body trial wave function is
* \f[
\Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}),
* \f]
* where \f$\Psi\f$s are represented by
* the derived classes from WaveFunctionComponent.
*/
/** @ingroup WaveFunctionComponent
* @brief An abstract class for a component of a many-body trial wave function
*
* mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects
* which are required to be base class pointers of the same derived class type.
* all the mw_ routines must be implemented in a way either stateless or maintains states of every walker.
*/
struct WaveFunctionComponent : public QMCTraits
{
/** enum for a update mode */
enum
{
ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */
ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */
ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */
ORB_WALKER, /*!< walker update */
ORB_ALLWALKER /*!< all walkers update */
};
typedef ParticleAttrib<ValueType> ValueVectorType;
typedef ParticleAttrib<GradType> GradVectorType;
typedef ParticleSet::Walker_t Walker_t;
typedef Walker_t::WFBuffer_t WFBufferType;
typedef Walker_t::Buffer_t BufferType;
typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
// the value type for log(psi)
using LogValueType = std::complex<QTFull::RealType>;
// the value type for psi(r')/psi(r)
using PsiValueType = QTFull::ValueType;
/** flag to set the optimization mode */
bool IsOptimizing;
/** boolean to set optimization
*
* If true, this object is actively modified during optimization
*/
bool Optimizable;
/** true, if this component is fermionic */
bool is_fermionic;
/** current update mode */
int UpdateMode;
/** current \f$\log\phi \f$
*/
LogValueType LogValue;
/** Pointer to the differential WaveFunctionComponent of this object
*
* If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables
*/
DiffWaveFunctionComponentPtr dPsi;
/** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$
*/
GradVectorType dLogPsi;
/** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$
*/
ValueVectorType d2LogPsi;
/** Name of the class derived from WaveFunctionComponent
*/
std::string ClassName;
///list of variables this WaveFunctionComponent handles
opt_variables_type myVars;
///Bytes in WFBuffer
size_t Bytes_in_WFBuffer;
/// default constructor
WaveFunctionComponent();
//WaveFunctionComponent(const WaveFunctionComponent& old);
///default destructor
virtual ~WaveFunctionComponent() {}
inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; }
///assign a differential WaveFunctionComponent
virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d);
///assembles the full value
PsiValueType getValue() const { return LogToValue<PsiValueType>::convert(LogValue); }
/** check in optimizable parameters
* @param active a super set of optimizable variables
*
* Add the paramemters this WaveFunctionComponent manage to active.
*/
virtual void checkInVariables(opt_variables_type& active) = 0;
/** check out optimizable variables
*
* Update myVars index map
*/
virtual void checkOutVariables(const opt_variables_type& active) = 0;
/** reset the parameters during optimizations
*/
virtual void resetParameters(const opt_variables_type& active) = 0;
/** print the state, e.g., optimizables */
virtual void reportStatus(std::ostream& os) = 0;
/** reset properties, e.g., distance tables, for a new target ParticleSet
* @param P ParticleSet
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** evaluate the value of the WaveFunctionComponent from scratch
* @param P active ParticleSet
* @param G Gradients, \f$\nabla\ln\Psi\f$
* @param L Laplacians, \f$\nabla^2\ln\Psi\f$
* @return the log value
*
* Mainly for walker-by-walker move. The initial stage of particle-by-particle
* move also uses this.
*/
virtual LogValueType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L) = 0;
/** evaluate from scratch the same type WaveFunctionComponent of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$
* @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$
* @@param values the log WF values of walkers in a batch
*/
virtual void mw_evaluateLog(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
const RefVector<ParticleSet::ParticleGradient_t>& G_list,
const RefVector<ParticleSet::ParticleLaplacian_t>& L_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().evaluateLog(P_list[iw], G_list[iw], L_list[iw]);
}
/** recompute the value of the WaveFunctionComponents which require critical accuracy.
* needed for Slater Determinants but not needed for most types of WaveFunctionComponents
*/
virtual void recompute(ParticleSet& P) {}
// virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi)
// {
// APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented");
// }
virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all)
{
APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class.");
}
/** return the current gradient for the iat-th particle
* @param P quantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGrad(ParticleSet& P, int iat)
{
APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class.");
return GradType();
}
/** return the current spin gradient for the iat-th particle
* Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin.
* @param P quantum particle set
* @param iat particle index
* @return the spin gradient of the iat-th particle
*/
virtual GradType evalGradWithSpin(ParticleSet& P, int iat, ComplexType& spingrad) { return evalGrad(P, iat); }
/** compute the current gradients for the iat-th particle of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_evalGrad(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<GradType>& grad_now)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat);
}
/** return the logarithmic gradient for the iat-th particle
* of the source particleset
* @param Pquantum particle set
* @param iat particle index
* @return the gradient of the iat-th particle
*/
virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat)
{
// unit_test_hamiltonian calls this function incorrectly; do not abort for now
// APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented");
return GradType();
}
/** Adds the gradient w.r.t. the iat-th particle of the
* source particleset (ions) of the logarithmic gradient
* and laplacian w.r.t. the target paritlceset (electrons).
* @param P quantum particle set (electrons)
* @param source classical particle set (ions)
* @param iat particle index of source (ion)
* @param the ion gradient of the elctron gradient
* @param the ion gradient of the elctron laplacian.
* @return the log gradient of psi w.r.t. the source particle iat
*/
virtual GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int iat,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
return GradType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat Gradient for the active particle
*/
virtual PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
APP_ABORT("WaveFunctionComponent::ratioGrad is not implemented in " + ClassName + " class.");
return ValueType();
}
/** evaluate the ratio of the new to old WaveFunctionComponent value and the new spin gradient
* Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin.
* @param P the active ParticleSet
* @param iat the index of a particle
* @param grad_iat realspace gradient for the active particle
* @param spingrad_iat spin gradient for the active particle
*/
virtual PsiValueType ratioGradWithSpin(ParticleSet& P, int iat, GradType& grad_iat, ComplexType& spingrad_iat)
{
return ratioGrad(P, iat, grad_iat);
}
/** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
* @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$
*/
virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios,
std::vector<GradType>& grad_new)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratioGrad(P_list[iw], iat, grad_new[iw]);
}
/** a move for iat-th particle is accepted. Update the current content.
* @param P target ParticleSet
* @param iat index of the particle whose new position was proposed
* @param safe_to_delay if true, delayed accept is safe.
*/
virtual void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) = 0;
/** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content.
* Note that all the lists only include accepted walkers.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param safe_to_delay if true, delayed accept is safe.
*/
virtual void mw_accept_rejectMove(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
const std::vector<bool>& isAccepted,
bool safe_to_delay = false)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
if (isAccepted[iw])
WFC_list[iw].get().acceptMove(P_list[iw], iat, safe_to_delay);
else
WFC_list[iw].get().restore(iat);
}
/** complete all the delayed updates, must be called after each substep or step during pbyp move
*/
virtual void completeUpdates() {}
/** complete all the delayed updates for all the walkers in a batch
* must be called after each substep or step during pbyp move
*/
virtual void mw_completeUpdates(const RefVector<WaveFunctionComponent>& WFC_list)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().completeUpdates();
}
/** If a move for iat-th particle is rejected, restore to the content.
* @param iat index of the particle whose new position was proposed
*
* Ye: hopefully we can gradually move away from restore
*/
virtual void restore(int iat) = 0;
/** evaluate the ratio of the new to old WaveFunctionComponent value
* @param P the active ParticleSet
* @param iat the index of a particle
* @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$
*
* Specialized for particle-by-particle move
*/
virtual PsiValueType ratio(ParticleSet& P, int iat) = 0;
/** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat particle index
* @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$
*/
virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
int iat,
std::vector<PsiValueType>& ratios)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat);
}
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Requests space in the buffer
* based on the data type sizes of the objects in this class.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
*/
virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list,
const std::vector<ParticleSet*>& P_list,
const RefVector<WFBufferType>& buf_list)
{
// We can't make this static but we can use a lambda with no capture to
// restrict access to *this scope
auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) {
wfc.registerData(pset, wfb);
};
for (int iw = 0; iw < WFC_list.size(); iw++)
registerComponentData(*(WFC_list[iw]), *(P_list[iw]), buf_list[iw]);
}
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param P particle set
* @param buf Anonymous storage
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
* @return log value of the wavefunction.
*/
virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0;
/** For particle-by-particle move. Put the objects of this class
* in the walker buffer or forward the memory cursor.
* @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param buf_list Anonymous storage
* @@param values the log WF values of walkers in a batch
* @param fromscratch request recomputing the precision critical
* pieces of wavefunction from scratch
*/
virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list,
const RefVector<ParticleSet>& P_list,
const RefVector<WFBufferType>& buf_list,
bool fromscratch = false)
{
#pragma omp parallel for
for (int iw = 0; iw < WFC_list.size(); iw++)
WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch);
}
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* The log value, P.G and P.L contribution from the objects
* of this class are also added.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0;
/** For particle-by-particle move. Copy data or attach memory
* from a walker buffer to the objects of this class.
* @param P particle set
* @param buf Anonymous storage
*/
virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list,
const RefVector<ParticleSet>& p_list,
const RefVector<WFBufferType>& buf_list)
{
#pragma omp parallel for
for (int iw = 0; iw < wfc_list.size(); iw++)
wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]);
}
/** make clone
* @param tqp target Quantum ParticleSet
* @param deepcopy if true, make a decopy
*
* If not true, return a proxy class
*/
virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
/** Intended as a handle to break
*
*
*/
//virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0;
/** Return the Chiesa kinetic energy correction
*/
virtual RealType KECorrection();
/** Compute derivatives of the wavefunction with respect to the optimizable
* parameters.
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction.
* Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog.
* Also the factor of -1/2 from the kinetic energy must be included here. The 1/m
* factor is applied in TrialWaveFunction.
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi);
/** Compute derivatives of rhe wavefunction with respect to the optimizable
* parameters
* @param P particle set
* @param optvars optimizable parameters
* @param dlogpsi array of derivatives of the log of the wavefunction
* Note: this function differs from the evaluateDerivatives function in the way that it only computes
* the derivative of the log of the wavefunction.
*/
virtual void evaluateDerivativesWF(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi);
virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi)
{
RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue));
for (int j = 0; j < myVars.size(); j++)
{
int loc = myVars.where(j);
dlogpsi[loc] *= myrat;
}
}
/** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to
the optimizable parameters, and the dot product of this is then
performed with the passed-in G_in gradient vector. This object is then
returned as dgradlogpsi.
*/
virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n");
}
virtual void finalizeOptimization() {}
/** evaluate the ratios of one virtual move with respect to all the particles
* @param P reference particleset
* @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$
*/
virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
*/
virtual void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios);
/** evaluate ratios to evaluate the non-local PP multiple walkers
* @param wfc_list the list of WaveFunctionComponent references of the same component in a walker batch
* @param vp_list the list of VirtualParticleSet references in a walker batch
* @param ratios of all the virtual moves of all the walkers
*/
virtual void mw_evaluateRatios(const RefVector<WaveFunctionComponent>& wfc_list,
const RefVector<const VirtualParticleSet>& vp_list,
std::vector<std::vector<ValueType>>& ratios)
{
#pragma omp parallel for
for (int iw = 0; iw < wfc_list.size(); iw++)
wfc_list[iw].get().evaluateRatios(vp_list[iw], ratios[iw]);
}
/** evaluate ratios to evaluate the non-local PP
* @param VP VirtualParticleSet
* @param ratios ratios with new positions VP.R[k] the VP.refPtcl
* @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$
*/
virtual void evaluateDerivRatios(VirtualParticleSet& VP,
const opt_variables_type& optvars,
std::vector<ValueType>& ratios,
Matrix<ValueType>& dratios);
/////////////////////////////////////////////////////
// Functions for vectorized evaluation and updates //
/////////////////////////////////////////////////////
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
virtual void freeGPUmem() {}
virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {}
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {}
/** Evaluate the log of the WF for all walkers
* @param walkers vector of all walkers
* @param logPsi output vector of log(psi)
*/
virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
/** Evaluate the wave-function ratio w.r.t. moving particle iat
* for all walkers
* @param walkers vector of all walkers
* @param iat particle which is moving
* @param psi_ratios output vector with psi_new/psi_old
*/
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
// Returns the WF ratio and gradient w.r.t. iat for each walker
// in the respective vectors
virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcRatio(MCWalkerConfiguration& W,
int iat,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addRatio(MCWalkerConfiguration& W,
int iat,
int k,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void ratio(std::vector<Walker_t*>& walkers,
std::vector<int>& iatList,
std::vector<PosType>& rNew,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad)
{
APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl)
{
APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void det_lookahead(MCWalkerConfiguration& W,
std::vector<ValueType>& psi_ratios,
std::vector<GradType>& grad,
std::vector<ValueType>& lapl,
int iat,
int k,
int kd,
int nw)
{
APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList)
{
APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
std::vector<NLjob>& jobList,
std::vector<PosType>& quadPoints,
std::vector<ValueType>& psi_ratios)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void NLratios(MCWalkerConfiguration& W,
gpu::device_vector<CUDA_PRECISION*>& Rlist,
gpu::device_vector<int*>& ElecList,
gpu::device_vector<int>& NumCoreElecs,
gpu::device_vector<CUDA_PRECISION*>& QuadPosList,
gpu::device_vector<CUDA_PRECISION*>& RatioList,
int numQuadPoints)
{
APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
virtual void evaluateDerivatives(MCWalkerConfiguration& W,
const opt_variables_type& optvars,
RealMatrix_t& dgrad_logpsi,
RealMatrix_t& dhpsi_over_psi)
{
APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName +
".\n Required CUDA functionality not implemented. Contact developers.\n");
}
#endif
};
} // namespace qmcplusplus
#endif
|
mem_managerRACBVH.h | #ifndef MEM_NANAGER_RACBVH_H
#define MEM_NANAGER_RACBVH_H
#include "App.h"
#include "VDTActiveList.h"
#include <math.h>
#include "memory_map.h"
// Application specific part
template <class T>
class RACBVH;
template <class T>
extern bool loadCluster(RACBVH<T> *pBVH, unsigned int CN, T* posCluster, long diskClusterOffset, int threadNum);
// read only memory manager based on LRU
template <class T>
class CMemElementRACBVH{
public:
int m_PageID; // Page idx in the original data
int m_CachedPageID; // idx of cached Page
T * m_Element;
int m_NumElement;
CMemElementRACBVH <T> * m_pNext, * m_pPrev;
CMemElementRACBVH (void)
{
m_CachedPageID = m_PageID = -1;
m_Element = NULL;
m_pNext = m_pPrev = NULL;
}
CMemElementRACBVH (int PageID, int CachedPageID, int NumElement)
{
m_PageID = PageID;
m_CachedPageID = CachedPageID;
m_NumElement = NumElement;
m_Element = new T [NumElement];
m_pNext = m_pPrev = NULL;
}
~CMemElementRACBVH (void)
{
if (m_Element) {
delete [] m_Element;
m_Element = NULL;
}
}
};
class CMemElementRACBVHCompressedCluster{
public:
int m_PageID; // Page idx in the original data
unsigned char *m_CachedCluster; // Pointer of cached compressed cluster
int m_ClusterSize;
CMemElementRACBVHCompressedCluster * m_pNext, * m_pPrev;
CMemElementRACBVHCompressedCluster (void)
{
m_CachedCluster = (unsigned char*)-1;
m_PageID = -1;
m_pNext = m_pPrev = NULL;
m_ClusterSize = 0;
}
CMemElementRACBVHCompressedCluster (int PageID, int ClusterSize)
{
m_PageID = PageID;
m_ClusterSize = ClusterSize;
m_CachedCluster = new unsigned char[ClusterSize];
m_pNext = m_pPrev = NULL;
}
~CMemElementRACBVHCompressedCluster (void)
{
if (m_CachedCluster) {
delete [] m_CachedCluster;
m_CachedCluster = NULL;
}
}
};
template <class T>
class CMemManagerRACBVH// : public CMemoryMappedFile <T>
{
public:
static bool IsPowerOfTwo (unsigned int Src, int & Power)
{
const int NumTests = 32;
static const unsigned int powConst[NumTests] =
{ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648U };
int i;
for (i = 0;i < NumTests;i++)
if (Src == powConst [i]) {
Power = i;
return true;
}
else if (Src < powConst [i]) {
return false;
}
return false;
}
static float _log_2 (float x)
{
float Result = log (x) / log (float (2));
return Result;
}
int UNLOADED;
char m_ObjName [255]; // Manager Name, for each debug
int m_ObjID; // Manager ID
int m_MaxNumPage; // maximum different Pages in the original data
int m_NumCachedPage; // maximum cached Pages in the manager
int m_CurNumCachedPage; // current # of cached Pages
int m_PageSize; // Page size in terms of element
int m_LocalIDMask, m_PageLocalBit; // bit mask and # of bit corresponding to slot size
int m_LastAccessedPage[NUM_THREADS];
int * m_Loaded; // indicate idx of cached Page if loaded
long * m_DiskClusterOffset; // disk offsets of compressed clusters
CMemElementRACBVH <T> ** m_pPages;
CActiveList <CMemElementRACBVH <T> * > m_LRUList;
#ifdef USE_DM
int m_MaxCachedMemCCluster;
int m_UsedCacedMemCCluster;
unsigned char **m_LoadedCCluster;
CMemElementRACBVHCompressedCluster ** m_pPagesCCluster;
CActiveList <CMemElementRACBVHCompressedCluster *> m_LRUListCCluster;
#endif
#ifdef _USE_OPENMP
omp_lock_t *lck;
#endif
CMemManagerRACBVH (void)
{
m_Loaded = NULL;
m_DiskClusterOffset = NULL;
m_pPages = NULL;
m_pRACBVH = NULL;
UNLOADED = -1;
}
// PageSize should be power of two for efficiency
bool Init (char * pName, int NumElement, int NumCachedPage, int PageSize)
{
bool Result = IsPowerOfTwo (PageSize, m_PageLocalBit);
if (Result == false) {
printf ("Page size (%d) is not power of two\n", PageSize);
exit (-1);
}
m_NumCachedPage = NumCachedPage;
m_MaxNumPage = int (ceil (float (NumElement) / float (PageSize)));
if (m_MaxNumPage < m_NumCachedPage)
m_NumCachedPage = m_MaxNumPage;
m_LocalIDMask = PageSize - 1;
m_CurNumCachedPage = -1;
m_PageSize = PageSize;
int i;
for(i=0;i<NUM_THREADS;i++)
m_LastAccessedPage[i] = -1;
strcpy (m_ObjName, pName);
m_Loaded = new int [m_MaxNumPage];
m_DiskClusterOffset = new long [m_MaxNumPage];
for (i = 0;i < m_MaxNumPage;i++)
{
m_Loaded [i] = UNLOADED;
m_DiskClusterOffset [i] = 0;
}
m_pPages = new CMemElementRACBVH <T> * [m_NumCachedPage];
{
// init LRU list
CMemElementRACBVH <T> * pStartHead = new CMemElementRACBVH <T>;
CMemElementRACBVH <T> * pEndHead = new CMemElementRACBVH <T>;
m_LRUList.InitList (pStartHead, pEndHead);
}
fprintf (stderr, "%d (among %d) Pages created (total size = %dK)\n",
m_NumCachedPage, m_MaxNumPage,
PageSize * m_NumCachedPage * sizeof (T) / 1024);
m_pRACBVH = NULL;
#ifdef _USE_OPENMP
lck = new omp_lock_t[m_MaxNumPage];
for(i=0;i<m_MaxNumPage;i++)
{
omp_init_lock(&lck[i]);
}
#endif
return true;
}
~CMemManagerRACBVH (void) {
if (m_Loaded) {
delete [] m_Loaded;
m_Loaded = NULL;
}
if (m_DiskClusterOffset) {
delete [] m_DiskClusterOffset;
m_DiskClusterOffset = NULL;
}
if (m_pPages) {
delete [] m_pPages;
m_pPages = NULL;
}
#ifdef _USE_OPENMP
int i;
for(i=0;i<m_MaxNumPage;i++)
{
omp_destroy_lock(&lck[i]);
}
delete[] lck;
#endif
}
const bool IsElementLoaded (unsigned int i)
{
int PageID = i >> m_PageLocalBit;
if (m_Loaded [PageID] == UNLOADED)
return false;
return true;
}
bool SetPageAccessed (unsigned int PageID)
{
assert (PageID < m_MaxNumPage);
assert (m_Loaded [PageID] != UNLOADED);
int CachedPageID = m_Loaded [PageID];
CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID];
/*
if (PageID != m_LastAccessedPage) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (pPage);
m_LastAccessedPage = PageID;
}
*/
return true;
}
T & operator [] (unsigned int i)
{
int PageID = i >> m_PageLocalBit;
int LocalID = i & m_LocalIDMask;
if (m_Loaded [PageID] == UNLOADED) {
#ifdef _USE_OPENMP
omp_set_lock(&lck[PageID]);
//cout << "[" << omp_get_thread_num() << "] " << "lock setted (" << lck << ")" << endl;
#endif
if (m_Loaded [PageID] == UNLOADED) {
if (m_CurNumCachedPage < m_NumCachedPage) {
m_CurNumCachedPage++;
int curNumCachedPage = m_CurNumCachedPage;
m_pPages [curNumCachedPage] = new CMemElementRACBVH <T> (PageID, curNumCachedPage, m_PageSize);
// require application specific load job
Load (m_pPages [curNumCachedPage], PageID);
m_LRUList.ForceAdd (m_pPages [curNumCachedPage]);
m_Loaded [PageID] = curNumCachedPage;
}
else {
CMemElementRACBVH <T> * pLeastUsed;
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
pLeastUsed = m_LRUList.m_pEnd->m_pPrev;
Unload (pLeastUsed);
m_Loaded [pLeastUsed->m_PageID] = -1;
}
m_LRUList.ForceAdd (pLeastUsed);
// require application specific load job
// Map.Load (StartPos, m_AccessibleSize, m_FileSize);
Load (pLeastUsed, PageID);
m_Loaded [PageID] = pLeastUsed->m_CachedPageID;
}
}
#ifdef _USE_OPENMP
//cout << "[" << omp_get_thread_num() << "] " << "lock unsetted (" << lck << ")" << endl;
omp_unset_lock(&lck[PageID]);
#endif
}
int CachedPageID = m_Loaded [PageID];
CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID];
#ifdef _USE_OPENMP
int thread_num = omp_get_thread_num();
#else
int thread_num = 0;
#endif
if (PageID != m_LastAccessedPage[thread_num]) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (pPage);
m_LastAccessedPage[thread_num] = PageID;
}
return pPage->m_Element [LocalID];
}
T & GetReference (unsigned int i)
{
int PageID = i >> m_PageLocalBit;
int LocalID = i & m_LocalIDMask;
if (m_Loaded [PageID] == UNLOADED) {
#ifdef _USE_OPENMP
omp_set_lock(&lck[PageID]);
#endif
if (m_Loaded [PageID] == UNLOADED) {
if (m_CurNumCachedPage < m_NumCachedPage) {
m_CurNumCachedPage++;
int curNumCachedPage = m_CurNumCachedPage;
m_pPages [curNumCachedPage] = new CMemElementRACBVH <T> (PageID, curNumCachedPage);
// require application specific load job
Load (m_pPages [curNumCachedPage], PageID);
m_LRUList.ForceAdd (m_pPages [curNumCachedPage]);
m_Loaded [PageID] = curNumCachedPage;
}
else {
CMemElementRACBVH <T> * pLeastUsed;
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
pLeastUsed = m_LRUList.m_pEnd->m_pPrev;
Unload (pLeastUsed);
m_Loaded [pLeastUsed->m_PageID] = -1;
}
// require application specific load job
// Map.Load (StartPos, m_AccessibleSize, m_FileSize);
Load (pLeastUsed, PageID);
m_LRUList.ForceAdd (pLeastUsed);
m_Loaded [PageID] = pLeastUsed->m_CachedPageID;
}
}
#ifdef _USE_OPENMP
omp_unset_lock(&lck[PageID]);
#endif
}
int CachedPageID = m_Loaded [PageID];
CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID];
#ifdef _USE_OPENMP
int thread_num = omp_get_thread_num();
#else
int thread_num = 0;
#endif
if (PageID != m_LastAccessedPage[thread_num]) {
// manage LRU list, already loaded. So put it front.
m_LRUList.ForceAdd (pPage);
m_LastAccessedPage[thread_num] = PageID;
}
return pPage->m_Element [LocalID];
}
const T & GetConstRefWithoutLRU (unsigned int i)
{
int PageID = i >> m_PageLocalBit;
int LocalID = i & m_LocalIDMask;
if (m_Loaded [PageID] == UNLOADED) {
fprintf (stderr, "GetConstRefWithoutLRU should not be called here\n");
exit (-1);
}
int CachedPageID = m_Loaded [PageID];
CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID];
return pPage->m_Element [LocalID];
}
T & GetReferenceWithoutLRU (unsigned int i) {
int PageID = i >> m_PageLocalBit;
int LocalID = i & m_LocalIDMask;
if (m_Loaded [PageID] == UNLOADED) {
fprintf (stderr, "GetReferenceWithoutLRU should not be called here\n");
exit (-1);
}
int CachedPageID = m_Loaded [PageID];
CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID];
return pPage->m_Element [LocalID];
}
// application specific data and functions
// TODO, we can do this by inheriting and virtualization
RACBVH<T> * m_pRACBVH; // class holding data
bool Unload (CMemElementRACBVH <T> * pElement) {
/*
if (m_ObjID == 0)
printf ("Obj ID = %d, Unload %d Page\n", m_ObjID, pElement->m_PageID);
*/
if (m_pRACBVH);
else {
//UnloadMapPage ((char *) pElement->m_Element);
pElement->m_Element = NULL;
}
return true;
}
bool Load (CMemElementRACBVH <T> * pElement, int PageID)
{
pElement->m_PageID = PageID;
if(m_pRACBVH)
{
int threadNum = 0;
#ifdef _USE_OPENMP
threadNum = omp_get_thread_num();
#endif
loadCluster(m_pRACBVH, PageID, pElement->m_Element, m_DiskClusterOffset[PageID], threadNum);
}
else
{
/*
if (m_UseFileMap) {
__int64 StartPos;
StartPos = (__int64) PageID * m_PageSize * sizeof (T);
//printf ("load %d unit\n", WhichMap);
pElement->m_Element = (T *) LoadPage (StartPos, m_PageSize * sizeof (T), m_FileSize, m_MappingMode);
}
*/
}
return true;
}
bool Flush (void)
{
return true;
}
};
#endif
|
GB_binop__land_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16)
// A*D function (colscale): GB (_AxD__land_int16)
// D*A function (rowscale): GB (_DxB__land_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16)
// C=scalar+B GB (_bind1st__land_int16)
// C=scalar+B' GB (_bind1st_tran__land_int16)
// C=A+scalar GB (_bind2nd__land_int16)
// C=A'+scalar GB (_bind2nd_tran__land_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
biseccao.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#include "omp.h"
// gcc -fopenmp -g biseccao.c -o biseccao -lm
// ./biseccao
void mostrar_argumento();
void calcular();
float funcao(float intervalo);
void resposta_formatada();
void mostrar_tempo_processamento(clock_t begin, clock_t end);
void calcular_com_32threads();
void calcular_com_16threads();
void calcular_com_8threads();
void calcular_com_4threads();
void calcular_com_2threads();
void calcular_com_1threads();
void calcular_com_nthreads();
float intervalo_inicial = 0;
float intervalo_final = 2;
float toleracia = 0.00000000000001;
int numero_threads = 1;
float raiz = 0;
float erro = 0;
int iteracoes = 0;
int main ()
{
// calcular_com_32threads();
// calcular_com_16threads();
// calcular_com_8threads();
// calcular_com_4threads();
// calcular_com_2threads();
calcular_com_1threads();
return 0;
}
void calcular_com_32threads()
{
calcular_com_nthreads(32);
}
void calcular_com_16threads()
{
calcular_com_nthreads(16);
}
void calcular_com_8threads()
{
calcular_com_nthreads(8);
}
void calcular_com_4threads()
{
calcular_com_nthreads(4);
}
void calcular_com_2threads()
{
calcular_com_nthreads(2);
}
void calcular_com_1threads()
{
calcular_com_nthreads(1);
}
void calcular_com_nthreads(int threads)
{
numero_threads = threads;
clock_t begin = clock();
mostrar_argumento();
calcular();
resposta_formatada();
clock_t end = clock();
mostrar_tempo_processamento(begin, end);
}
void mostrar_tempo_processamento (clock_t begin, clock_t end)
{
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Número Threads: %d Tempo de processamento: %f\n", numero_threads, time_spent);
}
void mostrar_argumento()
{
puts("******************************************************");
puts("ARGUMENTOS:\n");
printf("NUMERO_THREADS: %i\t", numero_threads);
printf("INTERVALO_INICIAL: %.4f\t", intervalo_inicial);
printf("INTERVALO_FINAL: %.4f\t", intervalo_final);
printf("TOLERACIA: %.1e\t", toleracia);
puts("");
}
void calcular ()
{
float pontoMedio = 0;
float resultadoFuncaoParaintervalo_inicial = 0;
float resultadoFuncaoParaIntervaloMedio = 0;
erro = fabs(intervalo_inicial - intervalo_final);
#pragma omp parallel num_threads(numero_threads)
while (erro > toleracia)
{
iteracoes++;
pontoMedio = (intervalo_inicial + intervalo_final) / 2;
resultadoFuncaoParaintervalo_inicial = funcao(intervalo_inicial);
resultadoFuncaoParaIntervaloMedio = funcao(pontoMedio);
if (resultadoFuncaoParaintervalo_inicial * resultadoFuncaoParaIntervaloMedio < 0)
intervalo_final = pontoMedio;
else
intervalo_inicial = pontoMedio;
erro = fabs(intervalo_inicial - intervalo_final);
if (iteracoes == 1000) break;
}
raiz = pontoMedio;
}
float funcao(float intervalo)
{
return (
pow(intervalo, 5) -
pow(intervalo, 4) -
pow(intervalo, 3) -
pow(intervalo, 2)
);
}
void resposta_formatada()
{
puts("");
printf(
" Raiz aproximada: %.4f\n Erro: %.4f\n Numero de Iterações: %i\n",
raiz,
erro,
iteracoes
);
puts("");
} |
CPUMatrixImpl.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKL 10.0 and above
#include <mkl.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
ElemType* p = new ElemType[n]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
// race-condition consideration: If idx[] references the same target column multiple times, this can have a race condition,
// and hence cannot use parallelism.
//#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jIn, a)
{
auto jOutF = idx(0, jIn); // this is the column we copy/add into
if (std::isnan(jOutF) || jOutF < 0) // negative index means gap
continue;
size_t jOut = (size_t)jOutF;
if (jOut >= GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds.");
ScaleAndAddColumn(/*beta=*/(ElemType)1, &us(0, jOut), &a(0, jIn), us.GetNumRows(), alpha);
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<ElemType> r(low, high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = r(generator);
bufPtr[i + 1] = r(generator);
bufPtr[i + 2] = r(generator);
bufPtr[i + 3] = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
// #pragma omp parallel for // is it thread safe?
foreach_coord (i, j, us)
{
us(i, j) = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = r(generator);
us(i + 1, j) = r(generator);
us(i + 2, j) = r(generator);
us(i + 3, j) = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<ElemType> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? 0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
bool unitGainMomentum)
{
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, bool unitGainMomentum)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * (ElemType)( 1.0 / (ada + 1e-8));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
foreach_row (i, us)
us(i, j) = (i == maxI) ? 1.0f : 0.0f;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
foreach_column (j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, abs(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, abs(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, abs(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
int i = 0;
std::generate(indices.begin(), indices.end(), [&i]
{
return i++;
});
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
// Partial sort, descending order.
std::nth_element(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// scaled ROI numbers (relative to original image size)
// roi points are doubles that represent location relative to image
ElemType scX = rois(base, (ElemType)0);
ElemType scY = rois(base + (ElemType)1, (ElemType)0);
ElemType scW = rois(base + (ElemType)2, (ElemType)0);
ElemType scH = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x = (size_t)round(scX * width);
size_t y = (size_t)round(scY * height);
ElemType roiW = (ElemType)max(round(scW * width), (ElemType)1);
ElemType roiH = (ElemType)max(round(scH * height), (ElemType)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y, (size_t)0), height);
wstart = min(max(wstart + x, (size_t)0), width);
hend = min(max(hend + y, (size_t)0), height);
wend = min(max(wend + x, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : -FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data is relative to original image size
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * width);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * height);
size_t roiWidth = max((size_t)round(rois[roiOffset + 2] * width), (size_t)1);
size_t roiHeight = max((size_t)round(rois[roiOffset + 3] * height), (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
grad(index, imgIdx) = gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<ElemType>& scale, const CPUMatrix<ElemType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<ElemType>& runMean, CPUMatrix<ElemType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<ElemType>& saveMean, CPUMatrix<ElemType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0);
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<ElemType>& scale, double blendFactor,
const CPUMatrix<ElemType>& saveMean, const CPUMatrix<ElemType>& saveInvStdDev,
CPUMatrix<ElemType>& scaleGrad, CPUMatrix<ElemType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (sizeof(ElemType) == sizeof(double))
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
if (sizeof(ElemType) == sizeof(double))
{
#ifdef USE_MKL
double wkopt;
int lwork = -1;
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, reinterpret_cast<double*>(W.Data()), &lwork, &info);
#else
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
else
{
#ifdef USE_MKL
float wkopt;
int lwork = -1;
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, reinterpret_cast<float*>(W.Data()), &lwork, &info);
#else
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += a(dim, instance_id) * b(dim, sample);
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (sizeof(ElemType) == sizeof(double))
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : 0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (sizeof(ElemType) == sizeof(double))
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : 0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, ElemType &totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore += scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
#ifdef USE_MKL
if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
RuntimeError("Could not set MKL compatible mode.");
#endif
}
// =======================================================================
// TensorView support
// =======================================================================
// To save time, this makes extensive use of templates and macros.
// -----------------------------------------------------------------------
// function to compute the value for a given output location (perform reduction if needed)
// -----------------------------------------------------------------------
// perform loop over reduction index m
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m>
struct TensorOpReduction
{
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t) m];
double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
// need to descend into one loop deeper
aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides));
}
// Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation.
return static_cast<double>(aggregate);
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&)
{
return opfn(pointers); // finally we are doing some work!!!
}
};
// perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices.
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, size_t N, int m>
struct TensorArgOpReduction
{
static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp)
{
size_t counter = 0;
size_t index = 0;
ElemType val = (ElemType)0;
switch (reducingOpDims.size())
{
case 3:
val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 2:
val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 1:
val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 0:
val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size());
}
return make_pair(val, index);
}
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t)m];
ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
bool update = false;
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
update = (aggregate > val);
break;
case ElementWiseOperator::opArgmax:
update = (aggregate < val);
break;
}
if (update)
{
aggregate = val;
index = counter - 1;
}
}
return aggregate;
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, size_t N>
struct TensorArgOpReduction<ElemType, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
counter++;
return *pointers[0]; // finally we are doing some work!!!
}
};
// -----------------------------------------------------------------------
// perform loop over regular index k for N-nary operations (N counting the output)
// -----------------------------------------------------------------------
// perform loop over regular index k and reducing index m for N operands (counting the output)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k>
struct TensorOpIteration
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t) k];
for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
// Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE.
// This is a very common case, e.g. adding vectors or computing the Sigmoid.
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
ElemType* pc = pointers[2];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default).
// TODO: The signedness of k (required for omp) causes an extra sign-extend.
// TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it?
}
};
// and unary
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
};
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1>
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// we are at element level for the result: perform the op (there may still be reduction)
ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
// scale
val *= alpha;
// combine with previous value in target matrix, then write it out
auto* pout = pointers.back();
if (beta != 0)
val += beta * *pout;
// save
*pout = val;
return;
}
};
// perform loop over regular index k and reducing index m for N operands (counting the output), the difference
// between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of
// the result. The reason that they aren't combined is because of performance.
template <class ElemType, size_t N, int k>
struct TensorArgOpIteration
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t)k];
for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
template <class ElemType, size_t N>
struct TensorArgOpIteration<ElemType, N, -1>
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// we are at element level for the result: perform the op (there may still be reduction)
auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp);
auto* pout = pointers.back();
*pout = (ElemType)val.second;
return;
}
};
// -----------------------------------------------------------------------
// map runtime parameters N to template parameters
// -----------------------------------------------------------------------
// tensor operation with k+1 dimensions (-1 means scalar)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k>
static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
size_t dims = reducingOpDims.size();
switch (dims)
{
case 2:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
{
// if all leading dimensions are 1, we can let the compiler do some unrolling
bool leadingAllOne = true;
for (size_t i = 0; i < N; i++)
leadingAllOne &= k >= 0 && regularStrides[i][0] == 1;
if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
default:
LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different k.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
pointers[i] += offsets[i];
size_t dims = regularOpDims.size();
switch (dims)
{
case 4:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 3:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 2:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different reductionOps
template <class ElemType, typename OPFN, size_t N>
static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we
// used double for aggregator of sum. But:
// * for min and max reductions this is meaningless.
// * It is not consitent with what we do on GPU, there we aggregate on ElemType.
// * It costs performance.
// TODO: apdapt e2e tests to run with aggregator of type ElemType.
#define CaseTensorOpWithFnAndReduction(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \
{ \
return Op##oper(a, b); \
}, \
offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
switch (reductionOp)
{
CaseTensorOpWithFnAndReduction(Sum);
CaseTensorOpWithFnAndReduction(LogSum);
CaseTensorOpWithFnAndReduction(Min);
CaseTensorOpWithFnAndReduction(Max);
CaseTensorOpWithFnAndReduction(ElementwiseProduct);
default:
LogicError("Specified ElementWiseOperator op %d not suported as reduction operation.", (int)reductionOp);
}
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; also map op to a lambda
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum &&
reductionOp != ElementWiseOperator::opLogSum &&
reductionOp != ElementWiseOperator::opMin &&
reductionOp != ElementWiseOperator::opMax &&
reductionOp != ElementWiseOperator::opElementwiseProduct)
InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented.");
// TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize.
#define CaseUnaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \
{ \
return Op##oper((*(pp[0]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 2> pointers = {a.Data(), Data()};
switch (op)
{
ForAllUnaryOps(CaseUnaryTensorOp);
default:
LogicError("TensorOp: Unknown unary op code %d.", (int) op);
}
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum.");
#define CaseBinaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 3> pointers = {a.Data(), b.Data(), Data()};
switch (op)
{
ForAllBinaryOps(CaseBinaryTensorOp);
default:
LogicError("TensorOp: Unknown op binary code %d.", (int) op);
}
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum.");
#define CaseTernaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), Data()};
switch (op)
{
ForAllTernaryOps(CaseTernaryTensorOp);
default:
LogicError("TensorOp: Unknown ternary op code %d.", (int) op);
}
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::min();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::min();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opArgmin &&
reductionOp != ElementWiseOperator::opArgmax)
InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
if (GetNumElements() == 1)
{
Data()[0] = (ElemType) a.ArgOp(reductionOp);
}
else
{
const size_t N = 2;
array<ElemType*, N> pointers = { a.Data(), Data() };
for (size_t i = 0; i < N; i++)
pointers[i] += offsets[i];
switch (regularOpDims.size())
{
case 2:
TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 1:
TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 0:
TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size());
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
GB_binop__pair_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = 1
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_subassign_04.c | //------------------------------------------------------------------------------
// GB_subassign_04: C(I,J) += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: matrix
// S: constructed
// C: not bitmap: use GB_bitmap_assign instead
// A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_04
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
//--------------------------------------------------------------------------
// Time: Close to Optimal. Every entry in A must be visited, and the
// corresponding entry in S must then be found. Time for this phase is
// Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S))
// time. This method simply traverses all of A+S (like GB_add for
// computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)).
// The only difference is that the traversal of A+S can terminate if A is
// exhausted. Entries in S but not A do not actually require any work
// (unlike Method 02, which must visit all entries in A+S).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
// TODO: phase2 of Method 02 and 04 are identical and could be
// done in a single function.
// Compare with Method 16, which computes C(I,J)<!M> += A, using S.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && Afound)
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
*/
template<typename DType>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
out[a * row + b] = in[b * col + a];
}
}
}
}
}
template<typename xpu>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType>(ret, src, axes, s);
});
return;
}
#endif
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 0: {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s);
Copy(out, in, s);
break;
}
case 1: {
Tensor<xpu, 1, DType> in = src.get<xpu, 1, DType>(s);
Tensor<xpu, 1, DType> out = ret.get<xpu, 1, DType>(s);
Copy(out, in, s);
break;
}
case 2: {
mshadow::Tensor<xpu, 2, DType> in = src.FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out = ret.FlatTo2D<xpu, DType>(s);
if (axes[0] == 1 && axes[1] == 0) {
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
out = in.T();
}
} else {
Copy(out, in, s);
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
out = transpose(in, axes.get<3>());
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
out = transpose(in, axes.get<4>());
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
out = transpose(in, axes.get<5>());
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
out = transpose(in, axes.get<6>());
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "Transpose does not support kWriteInplace and kAddTo";
if (param.axes.ndim() == 0) {
mxnet::TShape axes(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], param.axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
CHECK_NE(shp.ndim(), 0) << "Number of dimensions cannot be 0";
CHECK_NE(out_shp.ndim(), 0) << "Number of dimensions cannot be 0";
if (shp.ndim() == -1 && out_shp.ndim() == -1)
return false; // none of the shapes is known
if (out_shp.ndim() > 0 && shp.ndim() > 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) {
return false;
}
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size = ishape[axis] / sections;
for (int i = 0; i < sections; ++i) {
indices[i+1] = section_size * (i + 1);
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start < end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_grad.ndim();
}
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
GB_binop__bshift_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8)
// C=scalar+B GB (_bind1st__bshift_uint8)
// C=scalar+B' GB (_bind1st_tran__bshift_uint8)
// C=A+scalar GB (_bind2nd__bshift_uint8)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fp32)
// A*D function (colscale): GB (_AxD__div_fp32)
// D*A function (rowscale): GB (_DxB__div_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fp32)
// C=scalar+B GB (_bind1st__div_fp32)
// C=scalar+B' GB (_bind1st_tran__div_fp32)
// C=A+scalar GB (_bind2nd__div_fp32)
// C=A'+scalar GB (_bind2nd_tran__div_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij / bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x / y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FP32 || GxB_NO_DIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x / bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij / y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x / aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% John Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelBlack(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const PixelWand *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->view=AcquireCacheView(wand_view->wand->images);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
wand_view->exception=AcquireExceptionInfo();
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->view=AcquireCacheView(wand_view->wand->images);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->exception=AcquireExceptionInfo();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(destination->number_threads)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > GetOpenMPMaximumThreads())
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
pattern_generator.impl.h | /**
* Copyright (c) 2018, Daniel Thuerck, TU Darmstadt - GCC. All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD 3-clause license. See the LICENSE file for details.
*/
#include <libs/staging/pattern_generator.h>
#include <libs/staging/elimination_tree.h>
#include <set>
#include <cstdio>
#include <iostream>
#include <queue>
NS_CULIP_BEGIN
NS_STAGING_BEGIN
/**
* *****************************************************************************
* **************************** PatternGenerator *******************************
* *****************************************************************************
*/
template<typename T>
PatternGenerator<T>::
PatternGenerator()
{
}
/* ************************************************************************** */
template<typename T>
PatternGenerator<T>::
~PatternGenerator()
{
}
/**
* *****************************************************************************
* *************************** ZeroFillInPattern *******************************
* *****************************************************************************
*/
template<typename T>
ZeroFillInPattern<T>::
ZeroFillInPattern()
: PatternGenerator<T>()
{
}
/* ************************************************************************** */
template<typename T>
ZeroFillInPattern<T>::
~ZeroFillInPattern()
{
}
/* ************************************************************************** */
template<typename T>
Triangular_ptr<T>
ZeroFillInPattern<T>::
compute_pattern(
const csr_matrix_t<T> * A,
const mat_int_t num_piv_starts,
const mat_int_t * piv_starts)
{
/* save data */
this->m_A = A;
this->m_piv_starts.assign(piv_starts, piv_starts + num_piv_starts);
/* remove remove subdiagonal entries in block pivots */
std::vector<mat_int_t> sub_m(this->m_A->m);
for(mat_int_t i = 0; i < this->m_piv_starts.size(); ++i)
{
const mat_int_t piv_start = this->m_piv_starts[i];
const mat_int_t piv_end = (i < this->m_piv_starts.size() - 1 ?
this->m_piv_starts[i + 1] : this->m_A->m);
for(mat_int_t j = piv_start; j < piv_end; ++j)
sub_m[j] = piv_start;
}
/* determine row sizes of lower triangular */
std::vector<mat_int_t> row_sizes(this->m_A->m, 0);
mat_int_t nnz = 0;
for(mat_int_t i = 0; i < this->m_A->m; ++i)
{
const mat_int_t i_len = this->m_A->csr_row[i + 1] -
this->m_A->csr_row[i];
const mat_int_t * i_col = this->m_A->csr_col + this->m_A->csr_row[i];
for(mat_int_t j = 0; j < i_len && i_col[j] < sub_m[i]; ++j)
++row_sizes[i];
/* plus one for the diagonal */
++row_sizes[i];
nnz += row_sizes[i];
}
/* allocate output layout and copy data */
Triangular_ptr<T> L = Triangular_ptr<T>(
new Triangular<T>(this->m_A->m, nnz));
mat_int_t * L_csr_row = L->raw_row_ptr();
mat_int_t * L_csr_col = L->raw_col_ptr();
mat_int_t offset = 0;
L_csr_row[0] = 0;
for(mat_int_t i = 0; i < this->m_A->m; ++i)
{
const mat_int_t * i_col = this->m_A->csr_col + this->m_A->csr_row[i];
for(mat_int_t j = 0; j < row_sizes[i] - 1; ++j)
L_csr_col[offset++] = i_col[j];
/* diagonal entry */
L_csr_col[offset++] = i;
L_csr_row[i + 1] = offset;
}
return L;
}
/* ************************************************************************** */
template<typename T>
void
ZeroFillInPattern<T>::
init_pivot_pattern(
const csr_matrix_t<T> * A)
{
m_pL = FlexibleTriangular_ptr<T>(new FlexibleTriangular<T>(A));
m_is_piv = std::vector<mat_int_t>(A->m, 1);
}
/* ************************************************************************** */
template<typename T>
void
ZeroFillInPattern<T>::
pivot_1x1(
const mat_int_t cur_row,
const mat_int_t piv_a)
{
m_pL->pivot(cur_row, piv_a);
m_is_piv[cur_row] = 1;
}
/* ************************************************************************** */
template<typename T>
void
ZeroFillInPattern<T>::
pivot_2x2(
const mat_int_t cur_row,
const mat_int_t piv_a,
const mat_int_t piv_b)
{
m_pL->pivot(cur_row, piv_a);
m_pL->pivot(cur_row + 1, piv_b);
m_is_piv[cur_row] = 1;
m_is_piv[cur_row + 1] = 0;
}
/* ************************************************************************** */
template<typename T>
mat_int_t
ZeroFillInPattern<T>::
row_pattern(
const mat_int_t row,
mat_int_t * buf_ix)
{
mat_int_t len = 0;
/* pattern: row of pL (- subdiagonal element for 2x2 pivot) */
mat_int_t * pL_ix;
T * pL_val;
const mat_int_t pL_len = m_pL->row(row, pL_ix, pL_val);
for(mat_int_t i = 0; i < pL_len; ++i)
{
if(m_is_piv[row] || pL_ix[i] != row - 1)
{
buf_ix[len] = pL_ix[i];
++len;
}
}
return len;
}
/**
* *****************************************************************************
* ****************************** ExactPattern *********************************
* *****************************************************************************
*/
template<typename T>
ExactPattern<T>::
ExactPattern()
: PatternGenerator<T>()
{
}
/* ************************************************************************** */
template<typename T>
ExactPattern<T>::
~ExactPattern()
{
}
/* ************************************************************************** */
template<typename T>
Triangular_ptr<T>
ExactPattern<T>::
compute_pattern(
const csr_matrix_t<T> * A,
const mat_int_t num_piv_starts,
const mat_int_t * piv_starts)
{
/* save data */
this->m_A = A;
this->m_piv_starts.assign(piv_starts, piv_starts + num_piv_starts);
m_etree = EliminationTree_ptr<T>(new EliminationTree<T>(this->m_A->m,
this->m_piv_starts.size(), this->m_piv_starts.data()));
Triangular_ptr<T> L = m_etree->extract_pattern(this->m_A);
return L;
}
/* ************************************************************************** */
template<typename T>
void
ExactPattern<T>::
init_pivot_pattern(
const csr_matrix_t<T> * A)
{
this->m_A = A;
m_etree = EliminationTree_ptr<T>(new EliminationTree<T>(this->m_A->m));
m_etree->init_pivot_pattern(A);
}
/* ************************************************************************** */
template<typename T>
void
ExactPattern<T>::
pivot_1x1(
const mat_int_t cur_row,
const mat_int_t piv_a)
{
m_etree->pivot_1x1(cur_row, piv_a);
}
/* ************************************************************************** */
template<typename T>
void
ExactPattern<T>::
pivot_2x2(
const mat_int_t cur_row,
const mat_int_t piv_a,
const mat_int_t piv_b)
{
m_etree->pivot_2x2(cur_row, piv_a, piv_b);
}
/* ************************************************************************** */
template<typename T>
mat_int_t
ExactPattern<T>::
row_pattern(
const mat_int_t row,
mat_int_t * buf_ix)
{
return m_etree->row_pattern(row, buf_ix);
}
/**
* *****************************************************************************
* ******************************* LevelPattern ********************************
* *****************************************************************************
*/
template<typename T>
LevelPattern<T>::
LevelPattern(
const mat_int_t level)
: m_level(std::max(level, 0)),
PatternGenerator<T>()
{
}
/* ************************************************************************** */
template<typename T>
LevelPattern<T>::
~LevelPattern<T>()
{
}
/* ************************************************************************** */
template<typename T>
Triangular_ptr<T>
LevelPattern<T>::
compute_pattern(
const csr_matrix_t<T> * A,
const mat_int_t num_piv_starts,
const mat_int_t * piv_starts)
{
/* save data */
this->m_A = A;
m_pL = FlexibleTriangular_ptr<T>(new FlexibleTriangular<T>(A));
/* save pivots */
m_is_piv.resize(A->m);
std::fill(m_is_piv.begin(), m_is_piv.end(), 0);
for(mat_int_t i = 0; i < num_piv_starts; ++i)
m_is_piv[piv_starts[i]] = 1;
/* use BFS to find fill-paths (levels after sum rule) */
std::vector<mat_int_t> L_csr_col;
std::vector<mat_int_t> lvl_lens(A->m + 1);
#pragma omp parallel for
for(mat_int_t i = 0; i < A->m; ++i)
{
std::vector<mat_int_t> buf(i + 1);
lvl_lens[i] = row_pattern(i, buf.data());
}
lvl_lens[A->m] = 0;
/* compute offsets */
mat_int_t hold = lvl_lens[0];
lvl_lens[0] = 0;
for(mat_int_t i = 1; i < A->m + 1; ++i)
{
const mat_int_t res = lvl_lens[i - 1] + hold;
hold = lvl_lens[i];
lvl_lens[i] = res;
}
const mat_int_t nnz = lvl_lens[A->m];
L_csr_col.resize(nnz);
#pragma omp parallel for
for(mat_int_t i = 0; i < A->m; ++i)
{
row_pattern(i, L_csr_col.data() + lvl_lens[i]);
}
/* import pattern into triangular matrix */
Triangular_ptr<T> L = Triangular_ptr<T>(new Triangular<T>(A->m, nnz));
std::copy(lvl_lens.begin(), lvl_lens.end(), L->raw_row_ptr());
std::copy(L_csr_col.begin(), L_csr_col.end(), L->raw_col_ptr());
std::fill(L->raw_val_ptr(), L->raw_val_ptr() + nnz, 1.0);
return L;
}
/* ************************************************************************** */
template<typename T>
void
LevelPattern<T>::
init_pivot_pattern(
const csr_matrix_t<T> * A)
{
this->m_A = A;
m_pL = FlexibleTriangular_ptr<T>(new FlexibleTriangular<T>(A));
/* initialize with 1x1 pivots */
m_is_piv.resize(A->m);
std::fill(m_is_piv.begin(), m_is_piv.end(), 1);
}
/* ************************************************************************** */
template<typename T>
void
LevelPattern<T>::
pivot_1x1(
const mat_int_t cur_row,
const mat_int_t piv_a)
{
/* pivot source matrix */
m_pL->pivot(cur_row, piv_a);
/* mark cur_row as 1x1 pivot */
m_is_piv[cur_row] = 1;
}
/* ************************************************************************** */
template<typename T>
void
LevelPattern<T>::
pivot_2x2(
const mat_int_t cur_row,
const mat_int_t piv_a,
const mat_int_t piv_b)
{
/* pivot source matrix */
m_pL->pivot(cur_row, piv_a);
m_pL->pivot(cur_row + 1, piv_b);
/* mark cur_row as 2x2 pivot */
m_is_piv[cur_row] = 1;
m_is_piv[cur_row + 1] = 0;
}
/* ************************************************************************** */
template<typename T>
mat_int_t
LevelPattern<T>::
row_pattern(
const mat_int_t row,
mat_int_t * buf_ix)
{
/**
* Note: in the factorization, we solve L_11 (D_11 x)), hence the level
* fill computes L's fill-in first and then cares for D, i.e. 2x2 pivots
*/
const mat_int_t sub_m = (m_is_piv[row] == 0 ? (row - 1) : row);
mat_int_t nz_len = 0;
std::vector<mat_int_t> level(m_pL->m(), m_pL->m());
std::vector<mat_int_t> lpred(m_pL->m(), m_pL->m());
std::vector<bool> added(m_pL->m(), false);
std::queue<mat_int_t> bfs;
mat_int_t * cur_ix;
T * cur_val;
mat_int_t cur_len;
lpred[row] = -1;
level[row] = -1;
buf_ix[nz_len++] = row;
added[row] = true;
cur_len = m_pL->row(row, cur_ix, cur_val);
for(mat_int_t j = 0; j < cur_len; ++j)
{
if(cur_ix[j] < sub_m)
{
bfs.push(cur_ix[j]);
level[cur_ix[j]] = 0;
lpred[cur_ix[j]] = cur_ix[j];
}
}
auto level_step = [&](const mat_int_t cur, const mat_int_t next_pred,
const mat_int_t next_level)
{
cur_len = m_pL->row(cur, cur_ix, cur_val);
for(mat_int_t j = 0; j < cur_len; ++j)
{
if(cur_ix[j] < cur && next_level <= m_level &&
next_pred < lpred[cur_ix[j]])
{
bfs.push(cur_ix[j]);
level[cur_ix[j]] = next_level;
lpred[cur_ix[j]] = next_pred;
}
}
cur_len = m_pL->col(cur, cur_ix);
for(mat_int_t j = 0; j < cur_len; ++j)
{
if(cur_ix[j] < sub_m && next_level <= m_level &&
next_pred < lpred[cur_ix[j]])
{
bfs.push(cur_ix[j]);
level[cur_ix[j]] = next_level;
lpred[cur_ix[j]] = next_pred;
}
}
};
while(!bfs.empty())
{
const mat_int_t cur = bfs.front();
bfs.pop();
mat_int_t partner2x2 = -1;
if(cur < m_pL->m() - 1 && m_is_piv[cur] && !m_is_piv[cur + 1])
partner2x2 = cur + 1;
if(cur > 0 && !m_is_piv[cur] && m_is_piv[cur - 1])
partner2x2 = cur - 1;
if(cur >= lpred[cur])
{
if(!added[cur])
{
buf_ix[nz_len++] = cur;
added[cur] = true;
}
if(partner2x2 != -1 && !added[partner2x2])
{
buf_ix[nz_len++] = partner2x2;
added[partner2x2] = true;
}
}
const mat_int_t next_pred = std::max(cur, lpred[cur]);
const mat_int_t next_level = level[cur] + 1;
level_step(cur, next_pred, next_level);
if(partner2x2 != -1)
level_step(partner2x2, next_pred, next_level);
}
std::sort(buf_ix, buf_ix + nz_len);
return nz_len;
}
/**
* *****************************************************************************
* ************************** BlockRestrictedPattern ***************************
* *****************************************************************************
*/
template<typename T>
BlockRestrictedPattern<T>::
BlockRestrictedPattern(
const mat_int_t m,
const csr_matrix_t<T> * coarse,
const mat_int_t num_blocks,
const mat_int_t * block_starts)
: m_m(m),
m_coarse(coarse),
m_num_blocks(num_blocks),
m_block_starts(block_starts),
PatternGenerator<T>()
{
create_block_map();
}
/* ************************************************************************** */
template<typename T>
BlockRestrictedPattern<T>::
~BlockRestrictedPattern()
{
}
/* ************************************************************************** */
template<typename T>
Triangular_ptr<T>
BlockRestrictedPattern<T>::
compute_pattern(
const csr_matrix_t<T> * A,
const mat_int_t num_piv_starts,
const mat_int_t * piv_starts)
{
/* save data */
this->m_A = A;
m_pL = FlexibleTriangular_ptr<T>(new FlexibleTriangular<T>(A));
/* save pivots */
m_is_piv.resize(A->m);
std::fill(m_is_piv.begin(), m_is_piv.end(), 0);
for(mat_int_t i = 0; i < num_piv_starts; ++i)
m_is_piv[piv_starts[i]] = 1;
/* create block diagonal matrix */
m_pD = BlockDiagonal_ptr<T>(new BlockDiagonal<T>(A->m,
num_piv_starts, piv_starts));
/* use BFS to find fill-paths (levels after sum rule) */
std::vector<mat_int_t> buf(A->m);
m_row_starts.resize(A->m + 1);
m_row_ix.clear();
m_row_ix.reserve(A->nnz);
m_row_starts[0] = 0;
for(mat_int_t i = 0; i < A->m; ++i)
{
const mat_int_t i_len = row_pattern(i, buf.data());
m_row_starts[i + 1] = m_row_starts[i] + i_len;
std::copy(buf.begin(), buf.begin() + i_len,
std::back_inserter(m_row_ix));
}
const mat_int_t nnz = m_row_starts.back();
/* import pattern into triangular matrix */
Triangular_ptr<T> L = Triangular_ptr<T>(new Triangular<T>(A->m,
nnz));
std::copy(m_row_starts.begin(), m_row_starts.end(), L->raw_row_ptr());
std::copy(m_row_ix.begin(), m_row_ix.end(), L->raw_col_ptr());
std::fill(L->raw_val_ptr(), L->raw_val_ptr() + L->nnz(), (T) 1.0);
return L;
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedPattern<T>::
init_pivot_pattern(
const csr_matrix_t<T> * A)
{
this->m_A = A;
m_pL = FlexibleTriangular_ptr<T>(new FlexibleTriangular<T>(A));
m_pD = BlockDiagonal_ptr<T>(new BlockDiagonal<T>(A->m));
/* initialize with 1x1 pivots */
m_is_piv.resize(A->m);
std::fill(m_is_piv.begin(), m_is_piv.end(), 1);
for(mat_int_t i = 0; i < A->m; ++i)
{
Block_ptr<T> b = BlockFactory<T>::create_block(1, i);
m_pD->add_block(b.get());
}
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedPattern<T>::
pivot_1x1(
const mat_int_t cur_row,
const mat_int_t piv_a)
{
/* pivot source matrix */
m_pL->pivot(cur_row, piv_a);
/* mark cur_row as 1x1 pivot */
m_is_piv[cur_row] = 1;
/* recreate block diagonal matrix */
std::vector<Block_ptr<T>> prev_blocks;
for(mat_int_t i = 0; i < m_pD->num_blocks(); ++i)
{
const Block_ptr<T>& i_block = m_pD->raw_blocks()[i];
prev_blocks.emplace_back(i_block->copy());
}
/* add current_pivot */
prev_blocks.emplace_back(BlockFactory<T>::create_block(1, cur_row));
/* add remaining blocks */
for(mat_int_t i = cur_row + 1; i < m_m; ++i)
prev_blocks.emplace_back(BlockFactory<T>::create_block(1, i));
m_pD = BlockDiagonal_ptr<T>(new BlockDiagonal<T>(m_m, prev_blocks.size(),
prev_blocks.data()));
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedPattern<T>::
pivot_2x2(
const mat_int_t cur_row,
const mat_int_t piv_a,
const mat_int_t piv_b)
{
/* pivot source matrix */
m_pL->pivot(cur_row, piv_a);
m_pL->pivot(cur_row + 1, piv_b);
/* mark cur_row as 2x2 pivot */
m_is_piv[cur_row] = 1;
m_is_piv[cur_row + 1] = 0;
/* recreate block diagonal matrix */
std::vector<Block_ptr<T>> prev_blocks;
for(mat_int_t i = 0; i < m_pD->num_blocks(); ++i)
{
const Block_ptr<T>& i_block = m_pD->raw_blocks()[i];
prev_blocks.emplace_back(i_block->copy());
}
/* add current_pivot */
prev_blocks.emplace_back(BlockFactory<T>::create_block(2, cur_row));
/* add remaining blocks */
for(mat_int_t i = cur_row + 2; i < m_m; ++i)
prev_blocks.emplace_back(BlockFactory<T>::create_block(1, i));
m_pD = BlockDiagonal_ptr<T>(new BlockDiagonal<T>(m_m, prev_blocks.size(),
prev_blocks.data()));
}
/* ************************************************************************** */
template<typename T>
mat_int_t
BlockRestrictedPattern<T>::
row_pattern(
const mat_int_t row,
mat_int_t * buf_ix)
{
const mat_int_t sub_m = (m_is_piv[row] == 0 ? (row - 1) : row);
/**
* block restriction is similar to threshold dropping, hence there
* is no easy fill-in strategy -> every row must be generated from
* triangular solves
*/
/* extract A's pattern */
mat_int_t * A_ix;
T * A_val;
mat_int_t A_len = m_pL->row(row, A_ix, A_val);
/* solve with L first */
std::vector<mat_int_t> L_ix;
mat_int_t L_len = m_pL->sub_sanalysis(sub_m, A_len, A_ix);
L_ix.resize(L_len);
m_pL->sub_sanalysis_export(L_ix.data());
/* solve with D then */
std::vector<mat_int_t> LD_ix;
mat_int_t LD_len = m_pD->sub_sanalysis(sub_m, L_len, L_ix.data());
LD_ix.resize(LD_len);
m_pD->sub_sanalysis_export(LD_ix.data());
/* add diagonal element */
LD_ix.push_back(row);
/* remove all elements outside of blocks */
const mat_int_t coarse_row = m_block_map[row];
std::vector<bool> coarse_dense_row(m_coarse->m, false);
for(mat_int_t i = m_coarse->csr_row[coarse_row];
i < m_coarse->csr_row[coarse_row + 1]; ++i)
coarse_dense_row[m_coarse->csr_col[i]] = true;
LD_ix.erase(
std::remove_if(
LD_ix.begin(),
LD_ix.end(),
[&](const mat_int_t col)
{
const mat_int_t col_block = m_block_map[col];
return !coarse_dense_row[col_block];
}),
LD_ix.end());
std::sort(LD_ix.begin(), LD_ix.end());
/* copy to output */
std::copy(LD_ix.begin(), LD_ix.end(), buf_ix);
/* update L */
std::vector<T> vals(LD_ix.size(), 1.0);
m_pL->set_row(row, LD_ix.size(), LD_ix.data(), vals.data());
return LD_ix.size();
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedPattern<T>::
create_block_map()
{
m_block_map.resize(m_m);
for(mat_int_t i = 0; i < m_num_blocks; ++i)
{
const mat_int_t i_from = m_block_starts[i];
const mat_int_t i_to = (i < m_num_blocks - 1) ? m_block_starts[i + 1] :
m_m;
for(mat_int_t j = i_from; j < i_to; ++j)
m_block_map[j] = i;
}
}
/**
* *****************************************************************************
* ************************ BlockRestrictedExactPattern ************************
* *****************************************************************************
*/
template<typename T>
BlockRestrictedExactPattern<T>::
BlockRestrictedExactPattern(
const mat_int_t m,
const csr_matrix_t<T> * coarse,
const mat_int_t num_blocks,
const mat_int_t * block_starts)
: m_m(m),
m_coarse(coarse),
m_num_blocks(num_blocks),
m_block_starts(block_starts),
PatternGenerator<T>()
{
/* create a row-in-block map */
m_rowcol_in_block.resize(m);
for(mat_int_t i = 0; i < num_blocks; ++i)
{
const mat_int_t i_from = block_starts[i];
const mat_int_t i_to = (i < num_blocks - 1) ? block_starts[i + 1] :
m;
for(mat_int_t j = i_from; j < i_to; ++j)
m_rowcol_in_block[j] = i;
}
}
/* ************************************************************************** */
template<typename T>
BlockRestrictedExactPattern<T>::
~BlockRestrictedExactPattern()
{
}
/* ************************************************************************** */
template<typename T>
Triangular_ptr<T>
BlockRestrictedExactPattern<T>::
compute_pattern(
const csr_matrix_t<T> * A,
const mat_int_t num_piv_starts,
const mat_int_t * piv_starts)
{
/* save data */
this->m_A = A;
this->m_piv_starts.assign(piv_starts, piv_starts + num_piv_starts);
m_etree = EliminationTree_ptr<T>(new EliminationTree<T>(this->m_A->m,
this->m_piv_starts.size(), this->m_piv_starts.data()));
Triangular_ptr<T> L = m_etree->extract_pattern(this->m_A);
/* filter entries - remove if they are not in any block */
std::vector<mat_int_t> bigbuf(L->nnz());
std::vector<mat_int_t> filtered_sizes(m_m + 1);
for(mat_int_t i = 0; i < m_num_blocks; ++i)
{
const mat_int_t i_from = m_block_starts[i];
const mat_int_t i_to = (i < m_num_blocks - 1) ? m_block_starts[i + 1] :
m_m;
/* create a dense map and reuse in fine rows */
std::vector<char> map(i + 1);
create_dense_block_row(i, map.data());
#pragma omp parallel for
for(mat_int_t j = i_from; j < i_to; ++j)
{
filtered_sizes[j] = filter_row(j, L->row_length(j),
L->row_col(j), bigbuf.data() + L->raw_row_ptr()[j],
map.data());
}
}
filtered_sizes[m_m] = 0;
/* compute offsets */
mat_int_t hold = filtered_sizes[0];
filtered_sizes[0] = 0;
for(mat_int_t i = 1; i < m_m + 1; ++i)
{
const mat_int_t res = filtered_sizes[i - 1] + hold;
hold = filtered_sizes[i];
filtered_sizes[i] = res;
}
/* create filtered L */
const mat_int_t filtered_nnz = filtered_sizes[m_m];
Triangular_ptr<T> filt_L = Triangular_ptr<T>(new Triangular<T>(m_m,
filtered_nnz));
std::copy(filtered_sizes.begin(), filtered_sizes.end(),
filt_L->raw_row_ptr());
std::fill(filt_L->raw_val_ptr(), filt_L->raw_val_ptr() + filtered_nnz,
1.0);
for(mat_int_t i = 0; i < m_num_blocks; ++i)
{
const mat_int_t i_from = m_block_starts[i];
const mat_int_t i_to = (i < m_num_blocks - 1) ? m_block_starts[i + 1] :
m_m;
/* create a dense map and reuse in fine rows */
std::vector<char> map(i + 1);
create_dense_block_row(i, map.data());
#pragma omp parallel for
for(mat_int_t j = i_from; j < i_to; ++j)
{
filtered_sizes[j] = filter_row(j, L->row_length(j),
L->row_col(j), filt_L->raw_col_ptr() + filt_L->raw_row_ptr()[j],
map.data());
}
}
return filt_L;
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedExactPattern<T>::
init_pivot_pattern(
const csr_matrix_t<T> * A)
{
this->m_A = A;
m_etree = EliminationTree_ptr<T>(new EliminationTree<T>(this->m_A->m));
m_etree->init_pivot_pattern(A);
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedExactPattern<T>::
pivot_1x1(
const mat_int_t cur_row,
const mat_int_t piv_a)
{
m_etree->pivot_1x1(cur_row, piv_a);
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedExactPattern<T>::
pivot_2x2(
const mat_int_t cur_row,
const mat_int_t piv_a,
const mat_int_t piv_b)
{
m_etree->pivot_2x2(cur_row, piv_a, piv_b);
}
/* ************************************************************************** */
template<typename T>
mat_int_t
BlockRestrictedExactPattern<T>::
row_pattern(
const mat_int_t row,
mat_int_t * buf_ix)
{
std::vector<mat_int_t> buf(row + 1);
const mat_int_t orig_len = m_etree->row_pattern(row, buf.data());
return filter_row(row, orig_len, buf.data(), buf_ix);
}
/* ************************************************************************** */
template<typename T>
void
BlockRestrictedExactPattern<T>::
create_dense_block_row(
const mat_int_t block_row,
char * dense_row)
{
std::fill(dense_row, dense_row + block_row + 1, 0);
for(mat_int_t j = m_coarse->csr_row[block_row];
j < m_coarse->csr_row[block_row + 1]; ++j)
{
dense_row[m_coarse->csr_col[j]] = 1;
}
}
/* ************************************************************************** */
template<typename T>
mat_int_t
BlockRestrictedExactPattern<T>::
filter_row(
const mat_int_t row,
const mat_int_t row_len,
const mat_int_t * in_row_ix,
mat_int_t * out_row_ix,
const char * dense_map)
{
const mat_int_t block_row = m_rowcol_in_block[row];
/* create block row map if not given */
const char * use_dense_map = dense_map;
std::vector<char> map;
if(use_dense_map == nullptr)
{
map.resize(block_row + 1);
create_dense_block_row(block_row, map.data());
use_dense_map = map.data();
}
mat_int_t ptr = 0;
for(mat_int_t i = 0; i < row_len; ++i)
{
const mat_int_t block_col = m_rowcol_in_block[in_row_ix[i]];
if(use_dense_map[block_col])
out_row_ix[ptr++] = in_row_ix[i];
}
return ptr;
}
NS_STAGING_END
NS_CULIP_END |
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Typedef declaractions.
*/
typedef struct _ConstituteInfo
{
const char
*caption,
*comment,
*dispose,
*label;
MagickBooleanType
sync_from_exif,
sync_from_tiff;
MagickStatusType
delay_flags;
size_t
delay;
ssize_t
ticks_per_second;
} ConstituteInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
static void InitializeConstituteInfo(const ImageInfo *image_info,
ConstituteInfo *constitute_info)
{
const char
*option;
memset(constitute_info,0,sizeof(*constitute_info));
constitute_info->sync_from_exif=MagickTrue;
constitute_info->sync_from_tiff=MagickTrue;
option=GetImageOption(image_info,"exif:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_exif=MagickFalse;
option=GetImageOption(image_info,"tiff:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_tiff=MagickFalse;
constitute_info->caption=GetImageOption(image_info,"caption");
constitute_info->comment=GetImageOption(image_info,"comment");
constitute_info->label=GetImageOption(image_info,"label");
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
constitute_info->delay_flags=ParseGeometry(option,&geometry_info);
if (constitute_info->delay_flags != NoValue)
{
constitute_info->delay=floor(geometry_info.rho+0.5);
if ((constitute_info->delay_flags & SigmaValue) != 0)
constitute_info->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
}
}
static void SyncOrientationFromProperties(Image *image,
ConstituteInfo *constitute_info,ExceptionInfo *exception)
{
const char
*orientation;
orientation=(const char *) NULL;
if (constitute_info->sync_from_exif != MagickFalse)
{
orientation=GetImageProperty(image,"exif:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"exif:Orientation");
}
}
if ((orientation == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
orientation=GetImageProperty(image,"tiff:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"tiff:Orientation");
}
}
}
static void SyncResolutionFromProperties(Image *image,
ConstituteInfo *constitute_info, ExceptionInfo *exception)
{
const char
*resolution_x,
*resolution_y,
*resolution_units;
MagickBooleanType
used_tiff;
resolution_x=(const char *) NULL;
resolution_y=(const char *) NULL;
resolution_units=(const char *) NULL;
used_tiff=MagickFalse;
if (constitute_info->sync_from_exif != MagickFalse)
{
resolution_x=GetImageProperty(image,"exif:XResolution",exception);
resolution_y=GetImageProperty(image,"exif:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
resolution_units=GetImageProperty(image,"exif:ResolutionUnit",
exception);
}
if ((resolution_x == (const char *) NULL) &&
(resolution_y == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
resolution_x=GetImageProperty(image,"tiff:XResolution",exception);
resolution_y=GetImageProperty(image,"tiff:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
used_tiff=MagickTrue;
resolution_units=GetImageProperty(image,"tiff:ResolutionUnit",
exception);
}
}
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
GeometryInfo
geometry_info;
ssize_t
option_type;
geometry_info.rho=image->resolution.x;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_x,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_x,',') != (char *) NULL)
image->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
geometry_info.rho=image->resolution.y;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_y,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_y,',') != (char *) NULL)
image->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
if (resolution_units != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
resolution_units);
if (option_type >= 0)
image->units=(ResolutionType) option_type;
}
if (used_tiff == MagickFalse)
{
(void) DeleteImageProperty(image,"exif:XResolution");
(void) DeleteImageProperty(image,"exif:YResolution");
(void) DeleteImageProperty(image,"exif:ResolutionUnit");
}
else
{
(void) DeleteImageProperty(image,"tiff:XResolution");
(void) DeleteImageProperty(image,"tiff:YResolution");
(void) DeleteImageProperty(image,"tiff:ResolutionUnit");
}
}
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
ConstituteInfo
constitute_info;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
InheritException(exception,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderThreadSupport(magick_info) == MagickFalse))
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderThreadSupport(magick_info) == MagickFalse))
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
InitializeConstituteInfo(read_info,&constitute_info);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property;
const StringInfo
*profile;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if ((*magick_path == '\0') && (*next->magick == '\0'))
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
SyncOrientationFromProperties(next,&constitute_info,exception);
SyncResolutionFromProperties(next,&constitute_info,exception);
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
if (constitute_info.caption != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.caption,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
if (constitute_info.comment != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.comment,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
if (constitute_info.label != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.label,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
MagickStatusType
flags;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
char
timestamp[MagickTimeExtent];
(void) FormatMagickTime(next->timestamp,sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:timestamp",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
if (constitute_info.delay_flags != NoValue)
{
if ((constitute_info.delay_flags & GreaterValue) != 0)
{
if (next->delay > constitute_info.delay)
next->delay=constitute_info.delay;
}
else
if ((constitute_info.delay_flags & LessValue) != 0)
{
if (next->delay < constitute_info.delay)
next->delay=constitute_info.delay;
}
else
next->delay=constitute_info.delay;
if ((constitute_info.delay_flags & SigmaValue) != 0)
next->ticks_per_second=constitute_info.ticks_per_second;
}
if (constitute_info.dispose != (const char *) NULL)
{
ssize_t
option_type;
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
constitute_info.dispose);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
blob=Base64Decode(++p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
for (p=content; (*p != '/') && (*p != '\0'); p++) ;
if (*p != '\0')
{
char
*q;
ssize_t
i;
/*
Extract media type.
*/
if (LocaleNCompare(++p,"x-",2) == 0)
p+=2;
(void) strcpy(read_info->filename,"data.");
q=read_info->filename+5;
for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++)
*q++=(*p++);
*q++='\0';
}
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderThreadSupport(magick_info) == MagickFalse))
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderThreadSupport(magick_info) == MagickFalse))
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (images == (Image *) NULL)
return(MagickFalse);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
if (extent != 0)
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
const Quantum
*magick_restrict p;
const void
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
i,
u;
unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs((double) (alpha-OpaqueAlpha)) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
Quantum
*magick_restrict q;
ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const unsigned char
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const Quantum
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
normalize_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
*/
#include "normalize_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread)
{
memset(buffer, 0, hw * sizeof(float));
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
buffer[j] += (data * data);
}
}
//#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < hw; j++)
{
buffer[j] = 1.f / sqrt(buffer[j]);
}
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
*(output + i * hw + j) = data * buffer[j] * scale[i];
}
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
normalize_param_t* param = (normalize_param_t*)(ir_node->op.param_mem);
float* input_org = (float*)input_tensor->data;
float* output_org = (float*)output_tensor->data;
float* sclae_org = (float*)scale_tensor->data;
int batch_number = input_tensor->dims[0];
int channel_num = input_tensor->dims[1];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int img_size = channel_num * channel_size;
float* buffer = (float*)sys_malloc(channel_size * sizeof(float));
if (param->channel_shared == 0 && param->across_spatial == 0)
{
for (int i = 0; i < batch_number; i++)
{
norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread);
input_org += img_size;
output_org += img_size;
}
}
sys_free(buffer);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops normalize_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_normalize_ref_op()
{
return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
int unregister_normalize_ref_op()
{
return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
|
GxB_SelectOp_wait.c | //------------------------------------------------------------------------------
// GxB_SelectOp_wait: wait for a user-defined GxB_SelectOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GxB_SelectOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GxB_SelectOp_wait // no work, just check if the GxB_SelectOp is valid
(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GxB_SelectOp *op
#else
GxB_SelectOp op,
GrB_WaitMode waitmode
#endif
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GB_WHERE1 ("GxB_SelectOp_wait (&op)") ;
GB_RETURN_IF_NULL (op) ;
GB_RETURN_IF_NULL_OR_FAULTY (*op) ;
#else
GB_WHERE1 ("GxB_SelectOp_wait (op, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
#endif
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
HybridAdoptorBase.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
//////////////////////////////////////////////////////////////////////////////////////
/** @file HybridAdoptorBase.h
*
* Hybrid adoptor base class
*/
#ifndef QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H
#define QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H
#include <Particle/DistanceTableData.h>
#include <QMCWaveFunctions/lcao/SoaSphericalTensor.h>
#include <spline2/MultiBspline.hpp>
namespace qmcplusplus
{
template<typename ST>
struct AtomicOrbitalSoA
{
static const int D=3;
using AtomicSplineType=typename bspline_traits<ST,1>::SplineType;
using AtomicBCType=typename bspline_traits<ST,1>::BCType;
using AtomicSingleSplineType=UBspline_1d_d;
using PointType=TinyVector<ST,D>;
using value_type=ST;
using vContainer_type=aligned_vector<ST>;
// near core cutoff
ST rmin;
// far from core cutoff, rmin_sqrt>=rmin
ST rmin_sqrt;
ST cutoff, cutoff_buffer, spline_radius, non_overlapping_radius;
int spline_npoints, BaseN;
int NumBands, Npad;
PointType pos;
const int lmax, lm_tot;
SoaSphericalTensor<ST> Ylm;
vContainer_type l_vals;
vContainer_type r_power_minus_l;
///expose the pointer to reuse the reader and only assigned with create_spline
///also used as identifier of shallow copy
AtomicSplineType* MultiSpline;
MultiBspline1D<ST>* SplineInst;
vContainer_type localV, localG, localL;
AtomicOrbitalSoA(int Lmax):
Ylm(Lmax), MultiSpline(nullptr), SplineInst(nullptr), lmax(Lmax),
lm_tot((Lmax+1)*(Lmax+1))
{
r_power_minus_l.resize(lm_tot);
l_vals.resize(lm_tot);
for(int l=0; l<=lmax; l++)
for(int m=-l; m<=l; m++)
l_vals[l*(l+1)+m] = l;
rmin = std::exp(std::log(std::numeric_limits<ST>::min())/std::max(Lmax,1));
rmin = std::max(rmin,std::numeric_limits<ST>::epsilon());
rmin_sqrt=std::max(rmin,std::sqrt(std::numeric_limits<ST>::epsilon()));
}
~AtomicOrbitalSoA()
{
if(MultiSpline != nullptr) delete SplineInst;
}
inline void resizeStorage(size_t Nb)
{
NumBands=Nb;
Npad=getAlignedSize<ST>(Nb);
localV.resize(Npad*lm_tot);
localG.resize(Npad*lm_tot);
localL.resize(Npad*lm_tot);
create_spline();
qmc_common.memory_allocated += SplineInst->sizeInByte();
}
void bcast_tables(Communicate* comm)
{
chunked_bcast(comm, MultiSpline);
}
void gather_tables(Communicate* comm, std::vector<int> &offset)
{
gatherv(comm, MultiSpline, Npad, offset);
}
template<typename PT, typename VT>
inline void set_info(const PT& R, const VT& cutoff_in,
const VT& cutoff_buffer_in, const VT& spline_radius_in,
const VT& non_overlapping_radius_in, const int& spline_npoints_in)
{
pos[0]=R[0];
pos[1]=R[1];
pos[2]=R[2];
cutoff=cutoff_in;
cutoff_buffer=cutoff_buffer_in;
spline_radius=spline_radius_in;
spline_npoints=spline_npoints_in;
non_overlapping_radius=non_overlapping_radius_in;
BaseN=spline_npoints+2;
}
inline void create_spline()
{
AtomicBCType bc;
bc.lCode = FLAT;
bc.rCode = NATURAL;
Ugrid grid;
grid.start = 0.0;
grid.end = spline_radius;
grid.num = spline_npoints;
SplineInst = new MultiBspline1D<ST>();
SplineInst->create(grid, bc, lm_tot*Npad);
MultiSpline=&(SplineInst->spline_m);
}
inline void flush_zero()
{
SplineInst->flush_zero();
}
inline void set_spline(AtomicSingleSplineType* spline, int lm, int ispline)
{
SplineInst->copy_spline(spline, lm*Npad+ispline, 0, BaseN);
}
bool read_splines(hdf_archive& h5f)
{
einspline_engine<AtomicSplineType> bigtable(MultiSpline);
int lmax_in, spline_npoints_in;
ST spline_radius_in;
bool success=true;
success = success && h5f.read(lmax_in, "l_max");
success = success && h5f.read(spline_radius_in, "spline_radius");
success = success && h5f.read(spline_npoints_in, "spline_npoints");
if(lmax_in!=lmax) return false;
if(spline_radius_in!=spline_radius) return false;
if(spline_npoints_in!=spline_npoints) return false;
return success && h5f.read(bigtable,"radial_spline");
}
bool write_splines(hdf_archive& h5f)
{
bool success=true;
success = success && h5f.write(spline_radius, "spline_radius");
success = success && h5f.write(spline_npoints, "spline_npoints");
success = success && h5f.write(lmax, "l_max");
success = success && h5f.write(pos, "position");
einspline_engine<AtomicSplineType> bigtable(MultiSpline);
success = success && h5f.write(bigtable,"radial_spline");
return success;
}
//evaluate only V
template<typename VV>
inline void evaluate_v(const ST& r, const PointType& dr, VV& myV)
{
if (r>std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(dr[0]/r, dr[1]/r, dr[2]/r);
else
Ylm.evaluateV(0,0,1);
const ST* restrict Ylm_v=Ylm[0];
constexpr ST czero(0);
ST* restrict val=myV.data();
ST* restrict local_val=localV.data();
std::fill(myV.begin(),myV.end(),czero);
SplineInst->evaluate(r,localV);
for(size_t lm=0; lm<lm_tot; lm++)
{
#pragma omp simd aligned(val,local_val)
for(size_t ib=0; ib<myV.size(); ib++)
val[ib]+=Ylm_v[lm]*local_val[ib];
local_val+=Npad;
}
}
template<typename DISPL, typename VM>
inline void evaluateValues(const DISPL& Displacements, const int center_idx, const ST& r, VM& multi_myV)
{
if(r<=std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(0,0,1);
const ST* restrict Ylm_v=Ylm[0];
const size_t m=multi_myV.cols();
constexpr ST czero(0);
std::fill(multi_myV.begin(),multi_myV.end(),czero);
SplineInst->evaluate(r,localV);
for(int ivp=0; ivp<Displacements.size(); ivp++)
{
PointType dr=Displacements[ivp][center_idx];
if(r>std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(-dr[0]/r, -dr[1]/r, -dr[2]/r);
ST* restrict val=multi_myV[ivp];
ST* restrict local_val=localV.data();
for(size_t lm=0; lm<lm_tot; lm++)
{
#pragma omp simd aligned(val,local_val)
for(size_t ib=0; ib<m; ib++)
val[ib]+=Ylm_v[lm]*local_val[ib];
local_val+=Npad;
}
}
}
//evaluate VGL
template<typename VV, typename GV>
inline void evaluate_vgl(const ST& r, const PointType& dr, VV& myV, GV& myG, VV& myL)
{
ST drx, dry, drz, rhatx, rhaty, rhatz, rinv;
if (r>rmin)
{
rinv=1.0/r;
drx=dr[0];
dry=dr[1];
drz=dr[2];
rhatx=drx*rinv;
rhaty=dry*rinv;
rhatz=drz*rinv;
}
else
{
rinv=0;
drx=dr[0];
dry=dr[1];
drz=dr[2];
}
Ylm.evaluateVGL(drx, dry, drz);
const ST* restrict Ylm_v=Ylm[0];
const ST* restrict Ylm_gx=Ylm[1];
const ST* restrict Ylm_gy=Ylm[2];
const ST* restrict Ylm_gz=Ylm[3];
ST* restrict g0=myG.data(0);
ST* restrict g1=myG.data(1);
ST* restrict g2=myG.data(2);
constexpr ST czero(0), cone(1), chalf(0.5);
std::fill(myV.begin(),myV.end(),czero);
std::fill(g0,g0+Npad,czero);
std::fill(g1,g1+Npad,czero);
std::fill(g2,g2+Npad,czero);
std::fill(myL.begin(),myL.end(),czero);
ST* restrict val=myV.data();
ST* restrict lapl=myL.data();
ST* restrict local_val=localV.data();
ST* restrict local_grad=localG.data();
ST* restrict local_lapl=localL.data();
SplineInst->evaluate_vgl(r,localV,localG,localL);
if(r>rmin_sqrt)
{
// far from core
r_power_minus_l[0]=cone;
ST r_power_temp=cone;
for(int l=1; l<=lmax; l++)
{
r_power_temp*=rinv;
for(int m=-l, lm=l*l; m<=l; m++,lm++)
r_power_minus_l[lm]=r_power_temp;
}
for(size_t lm=0; lm<lm_tot; lm++)
{
const ST& l_val=l_vals[lm];
const ST& r_power=r_power_minus_l[lm];
const ST Ylm_rescale=Ylm_v[lm]*r_power;
const ST rhat_dot_G = ( rhatx*Ylm_gx[lm] + rhaty*Ylm_gy[lm] + rhatz*Ylm_gz[lm] ) * r_power;
#pragma omp simd aligned(val,g0,g1,g2,lapl,local_val,local_grad,local_lapl)
for(size_t ib=0; ib<myV.size(); ib++)
{
const ST local_v=local_val[ib];
const ST local_g=local_grad[ib];
const ST local_l=local_lapl[ib];
// value
const ST Vpart = l_val*rinv*local_v;
val[ib] += Ylm_rescale*local_v;
// grad
const ST factor1 = local_g*Ylm_rescale;
const ST factor2 = local_v*r_power;
const ST factor3 = -Vpart*Ylm_rescale;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += (local_l + ( local_g * ( 2 - l_val ) - Vpart ) * rinv) * Ylm_rescale
+ (local_g - Vpart ) * rhat_dot_G;
}
local_val+=Npad;
local_grad+=Npad;
local_lapl+=Npad;
}
}
else if(r>rmin)
{
// the possibility of reaching here is very very low
std::cout << "Warning: an electron is very close to an ion, distance=" << r << " be careful!" << std::endl;
// near core, kill divergence in the laplacian
r_power_minus_l[0]=cone;
ST r_power_temp=cone;
for(int l=1; l<=lmax; l++)
{
r_power_temp*=rinv;
for(int m=-l, lm=l*l; m<=l; m++,lm++)
r_power_minus_l[lm]=r_power_temp;
}
for(size_t lm=0; lm<lm_tot; lm++)
{
const ST& l_val=l_vals[lm];
const ST& r_power=r_power_minus_l[lm];
const ST Ylm_rescale=Ylm_v[lm]*r_power;
const ST rhat_dot_G = (Ylm_gx[lm] * rhatx + Ylm_gy[lm] * rhaty + Ylm_gz[lm] * rhatz ) * r_power * r;
#pragma omp simd aligned(val,g0,g1,g2,lapl,local_val,local_grad,local_lapl)
for(size_t ib=0; ib<myV.size(); ib++)
{
const ST local_v=local_val[ib];
const ST local_g=local_grad[ib];
const ST local_l=local_lapl[ib];
// value
const ST Vpart = Ylm_rescale*local_v;
val[ib] += Vpart;
// grad
const ST factor1 = local_g*Ylm_rescale;
const ST factor2 = local_v*r_power;
const ST factor3 = -l_val*Vpart*rinv;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += local_l * (cone - chalf *l_val) * ( 3 * Ylm_rescale + rhat_dot_G );
}
local_val+=Npad;
local_grad+=Npad;
local_lapl+=Npad;
}
}
else
{
std::cout << "Warning: an electron is on top of an ion!" << std::endl;
// strictly zero
#pragma omp simd aligned(val,lapl,local_val,local_lapl)
for(size_t ib=0; ib<myV.size(); ib++)
{
// value
val[ib] = Ylm_v[0]*local_val[ib];
// laplacian
lapl[ib] = local_lapl[ib] * static_cast<ST>(3) * Ylm_v[0];
}
local_val+=Npad;
local_grad+=Npad;
local_lapl+=Npad;
if(lm_tot>0)
{
//std::cout << std::endl;
for(size_t lm=1; lm<4; lm++)
{
#pragma omp simd aligned(g0,g1,g2,local_grad)
for(size_t ib=0; ib<myV.size(); ib++)
{
const ST local_g=local_grad[ib];
// grad
g0[ib] += local_g * Ylm_gx[lm];
g1[ib] += local_g * Ylm_gy[lm];
g2[ib] += local_g * Ylm_gz[lm];
}
local_grad+=Npad;
}
}
}
}
template<typename VV, typename GV, typename HT>
void evaluate_vgh(const ST& r, const PointType& dr, VV& myV, GV& myG, HT& myH)
{
//Needed to do tensor product here
APP_ABORT("AtomicOrbitalSoA::evaluate_vgh");
}
};
/** adoptor class to match
*
*/
template<typename ST>
struct HybridAdoptorBase
{
static const int D=3;
using PointType=typename AtomicOrbitalSoA<ST>::PointType;
using RealType=typename DistanceTableData::RealType;
// atomic centers
std::vector<AtomicOrbitalSoA<ST> > AtomicCenters;
///table index
int myTableID;
//mapping supercell to primitive cell
std::vector<int> Super2Prim;
// r, dr for distance table
RealType dist_r;
DistanceTableData::PosType dist_dr;
// for APBC
PointType r_image;
// smooth function derivatives
RealType df_dr, d2f_dr2;
HybridAdoptorBase() { }
void set_info(const ParticleSet& ions, ParticleSet& els, const std::vector<int>& mapping)
{
myTableID=els.addTable(ions,DT_SOA);
Super2Prim=mapping;
}
inline void resizeStorage(size_t Nb)
{
for(int ic=0; ic<AtomicCenters.size(); ic++)
AtomicCenters[ic].resizeStorage(Nb);
}
void bcast_tables(Communicate* comm)
{
for(int ic=0; ic<AtomicCenters.size(); ic++)
AtomicCenters[ic].bcast_tables(comm);
}
void gather_atomic_tables(Communicate* comm, std::vector<int> &offset)
{
if(comm->size()==1) return;
for(int ic=0; ic<AtomicCenters.size(); ic++)
AtomicCenters[ic].gather_tables(comm, offset);
}
inline void flush_zero()
{
for(int ic=0; ic<AtomicCenters.size(); ic++)
AtomicCenters[ic].flush_zero();
}
bool read_splines(hdf_archive& h5f)
{
bool success=true;
size_t ncenter;
success = success && h5f.push("atomic_centers",false);
success = success && h5f.read(ncenter,"number_of_centers");
if(!success) return success;
if(ncenter!=AtomicCenters.size()) success=false;
// read splines of each center
for(int ic=0; ic<AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(),false);
success = success && AtomicCenters[ic].read_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
bool write_splines(hdf_archive& h5f)
{
bool success=true;
int ncenter=AtomicCenters.size();
success = success && h5f.push("atomic_centers",true);
success = success && h5f.write(ncenter,"number_of_centers");
// write splines of each center
for(int ic=0; ic<AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(),true);
success = success && AtomicCenters[ic].write_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
template<typename Cell>
inline int get_bc_sign(const PointType& r, const Cell& PrimLattice, TinyVector<int,D>& HalfG)
{
int bc_sign=0;
PointType shift_unit = PrimLattice.toUnit(r-r_image);
for(int i=0; i<D; i++)
{
ST img = round(shift_unit[i]);
bc_sign += HalfG[i] * (int)img;
}
return bc_sign;
}
//evaluate only V
template<typename VV>
inline RealType evaluate_v(const ParticleSet& P, const int iat, VV& myV)
{
const auto* ei_dist=P.DistTables[myTableID];
const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat);
if(center_idx<0) abort();
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
if ( dist_r < myCenter.cutoff )
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image=myCenter.pos+dr;
myCenter.evaluate_v(dist_r, dr, myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
/* check if the batched algorithm is safe to operate
* @param VP virtual particle set
* @return true if it is safe
*
* When the reference electron in the NLPP evaluation has a distance larger than the non overlapping radius of the reference center.
* Some qudrature points may get its SPOs evaluated from the nearest center which is not the reference center.
* The batched algorthm forces the evaluation on the reference center and introduce some error.
* In this case, the non-batched algorithm should be used.
*/
bool is_batched_safe(const VirtualParticleSet& VP)
{
const int center_idx=VP.refSourcePtcl;
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
return VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx] < myCenter.non_overlapping_radius;
}
// C2C, C2R cases
template<typename VM>
inline RealType evaluateValuesC2X(const VirtualParticleSet& VP, VM& multi_myV)
{
const int center_idx=VP.refSourcePtcl;
dist_r = VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx];
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
if ( dist_r < myCenter.cutoff )
{
myCenter.evaluateValues(VP.DistTables[myTableID]->Displacements, center_idx, dist_r, multi_myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
// R2R case
template<typename VM, typename Cell, typename SV>
inline RealType evaluateValuesR2R(const VirtualParticleSet& VP,
const Cell& PrimLattice, TinyVector<int,D>& HalfG,
VM& multi_myV, SV& bc_signs)
{
const int center_idx=VP.refSourcePtcl;
dist_r = VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx];
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
if ( dist_r < myCenter.cutoff )
{
const auto &displ=VP.DistTables[myTableID]->Displacements;
for(int ivp=0; ivp<VP.getTotalNum(); ivp++)
{
r_image=myCenter.pos-displ[ivp][center_idx];
bc_signs[ivp]=get_bc_sign(VP.R[ivp], PrimLattice, HalfG);;
}
myCenter.evaluateValues(displ, center_idx, dist_r, multi_myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
//evaluate only VGL
template<typename VV, typename GV>
inline RealType evaluate_vgl(const ParticleSet& P, const int iat, VV& myV, GV& myG, VV& myL)
{
const auto* ei_dist=P.DistTables[myTableID];
const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat);
if(center_idx<0) abort();
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
if ( dist_r < myCenter.cutoff )
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image=myCenter.pos+dr;
myCenter.evaluate_vgl(dist_r, dr, myV, myG, myL);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
//evaluate only VGH
template<typename VV, typename GV, typename HT>
inline RealType evaluate_vgh(const ParticleSet& P, const int iat, VV& myV, GV& myG, HT& myH)
{
const auto* ei_dist=P.DistTables[myTableID];
const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat);
if(center_idx<0) abort();
auto& myCenter=AtomicCenters[Super2Prim[center_idx]];
if ( dist_r < myCenter.cutoff )
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image=myCenter.pos+dr;
myCenter.evaluate_vgh(dist_r, dr, myV, myG, myH);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
inline RealType smooth_function(const ST &cutoff_buffer, const ST &cutoff, RealType r)
{
const RealType cone(1), ctwo(2), chalf(0.5);
if (r<cutoff_buffer) return cone;
const RealType scale=ctwo/(cutoff-cutoff_buffer);
const RealType x=(r-cutoff_buffer)*scale-cone;
const RealType cosh_x=std::cosh(x);
const RealType tanh_x=std::tanh(x);
df_dr=-chalf/(cosh_x*cosh_x)*scale;
d2f_dr2=-ctwo*tanh_x*df_dr*scale;
return chalf*(cone-tanh_x);
}
};
}
#endif
|
normal.c | /* =============================================================================
*
* normal.c
* -- Implementation of normal k-means clustering algorithm
*
* =============================================================================
*
* Author:
*
* Wei-keng Liao
* ECE Department, Northwestern University
* email: wkliao@ece.northwestern.edu
*
*
* Edited by:
*
* Jay Pisharath
* Northwestern University.
*
* Chi Cao Minh
* Stanford University
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "common.h"
#include "normal.h"
#include "random.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "util.h"
double global_time = 0.0;
typedef struct args {
double** feature;
int nfeatures;
int npoints;
int nclusters;
int* membership;
double** clusters;
int** new_centers_len;
double** new_centers;
} args_t;
double global_delta;
long global_i; /* index into task queue */
#define CHUNK 3
/* =============================================================================
* work
* =============================================================================
*/
static void
work (void* argPtr)
{
TM_THREAD_ENTER();
args_t* args = (args_t*)argPtr;
double** feature = args->feature;
int nfeatures = args->nfeatures;
int npoints = args->npoints;
int nclusters = args->nclusters;
int* membership = args->membership;
double** clusters = args->clusters;
int** new_centers_len = args->new_centers_len;
double** new_centers = args->new_centers;
double delta = 0.0;
int index;
int i;
int j;
int start;
int stop;
int myId;
myId = thread_getId();
start = myId * CHUNK;
while (start < npoints) {
stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints);
for (i = start; i < stop; i++) {
index = common_findNearestPoint(feature[i],
nfeatures,
clusters,
nclusters);
/*
* If membership changes, increase delta by 1.
* membership[i] cannot be changed by other threads
*/
if (membership[i] != index) {
delta += 1.0;
}
/* Assign the membership to object i */
/* membership[i] can't be changed by other thread */
membership[i] = index;
/* Update new cluster centers : sum of objects located within */
TM_BEGIN();
TM_SHARED_WRITE(*new_centers_len[index],
TM_SHARED_READ(*new_centers_len[index]) + 1);
for (j = 0; j < nfeatures; j++) {
TM_SHARED_WRITE_D(
new_centers[index][j],
(TM_SHARED_READ_D(new_centers[index][j]) + feature[i][j])
);
}
TM_END();
}
/* Update task queue */
if (start + CHUNK < npoints) {
TM_BEGIN();
start = (int)TM_SHARED_READ(global_i);
TM_SHARED_WRITE(global_i, (start + CHUNK));
TM_END();
} else {
break;
}
}
TM_BEGIN();
TM_SHARED_WRITE_D(global_delta, TM_SHARED_READ_D(global_delta) + delta);
TM_END();
TM_THREAD_EXIT();
}
/* =============================================================================
* normal_exec
* =============================================================================
*/
double**
normal_exec (int nthreads,
double** feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
double threshold,
int* membership,
random_t* randomPtr) /* out: [npoints] */
{
int i;
int j;
int loop = 0;
int** new_centers_len; /* [nclusters]: no. of points in each cluster */
double delta;
double** clusters; /* out: [nclusters][nfeatures] */
double** new_centers; /* [nclusters][nfeatures] */
void* alloc_memory = NULL;
args_t args;
TIMER_T start;
TIMER_T stop;
/* Allocate space for returning variable clusters[] */
clusters = (double**)malloc(nclusters * sizeof(double*));
assert(clusters);
clusters[0] = (double*)malloc(nclusters * nfeatures * sizeof(double));
assert(clusters[0]);
for (i = 1; i < nclusters; i++) {
clusters[i] = clusters[i-1] + nfeatures;
}
/* Randomly pick cluster centers */
for (i = 0; i < nclusters; i++) {
int n = (int)(random_generate(randomPtr) % npoints);
for (j = 0; j < nfeatures; j++) {
clusters[i][j] = feature[n][j];
}
}
for (i = 0; i < npoints; i++) {
membership[i] = -1;
}
/*
* Need to initialize new_centers_len and new_centers[0] to all 0.
* Allocate clusters on different cache lines to reduce false sharing.
*/
{
int cluster_size = sizeof(int) + sizeof(double) * nfeatures;
const int cacheLineSize = 32;
cluster_size += (cacheLineSize-1) - ((cluster_size-1) % cacheLineSize);
alloc_memory = malloc(nclusters * cluster_size);
memset(alloc_memory, 0, nclusters * cluster_size);
new_centers_len = (int**) malloc(nclusters * sizeof(int*));
new_centers = (double**) malloc(nclusters * sizeof(double*));
assert(alloc_memory && new_centers && new_centers_len);
for (i = 0; i < nclusters; i++) {
new_centers_len[i] = (int*)((char*)alloc_memory + cluster_size * i);
new_centers[i] = (double*)((char*)alloc_memory + cluster_size * i + sizeof(int));
}
}
TIMER_READ(start);
GOTO_SIM();
do {
delta = 0.0;
args.feature = feature;
args.nfeatures = nfeatures;
args.npoints = npoints;
args.nclusters = nclusters;
args.membership = membership;
args.clusters = clusters;
args.new_centers_len = new_centers_len;
args.new_centers = new_centers;
global_i = nthreads * CHUNK;
global_delta = delta;
#ifdef OTM
#pragma omp parallel
{
work(&args);
}
#else
thread_start(work, &args);
#endif
delta = global_delta;
/* Replace old cluster centers with new_centers */
for (i = 0; i < nclusters; i++) {
for (j = 0; j < nfeatures; j++) {
if (new_centers_len[i] > 0) {
clusters[i][j] = new_centers[i][j] / *new_centers_len[i];
}
new_centers[i][j] = 0.0; /* set back to 0 */
}
*new_centers_len[i] = 0; /* set back to 0 */
}
delta /= npoints;
} while ((delta > threshold) && (loop++ < 500));
GOTO_REAL();
TIMER_READ(stop);
global_time += TIMER_DIFF_SECONDS(start, stop);
free(alloc_memory);
free(new_centers);
free(new_centers_len);
return clusters;
}
/* =============================================================================
*
* End of normal.c
*
* =============================================================================
*/
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
*/
void Init(const char* data_filename);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore();
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
struct TrainingShareStates {
int num_threads = 0;
bool is_colwise = true;
bool is_use_subcol = false;
bool is_use_subrow = false;
bool is_subrow_copied = false;
bool is_constant_hessian = true;
const data_size_t* bagging_use_indices;
data_size_t bagging_indices_cnt;
int num_bin_aligned;
std::unique_ptr<MultiValBin> multi_val_bin;
std::unique_ptr<MultiValBin> multi_val_bin_subset;
std::vector<uint32_t> hist_move_src;
std::vector<uint32_t> hist_move_dest;
std::vector<uint32_t> hist_move_size;
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>
hist_buf;
void SetMultiValBin(MultiValBin* bin) {
if (bin == nullptr) {
return;
}
multi_val_bin.reset(bin);
num_threads = OMP_NUM_THREADS();
num_bin_aligned =
(bin->num_bin() + kAlignedSize - 1) / kAlignedSize * kAlignedSize;
size_t new_size = static_cast<size_t>(num_bin_aligned) * 2 * num_threads;
if (new_size > hist_buf.size()) {
hist_buf.resize(static_cast<size_t>(num_bin_aligned) * 2 * num_threads);
}
}
hist_t* TempBuf() {
if (!is_use_subcol) {
return nullptr;
}
return hist_buf.data() + hist_buf.size() - num_bin_aligned * 2;
}
void HistMove(const hist_t* src, hist_t* dest) {
if (!is_use_subcol) {
return;
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(hist_move_src.size()); ++i) {
std::copy_n(src + hist_move_src[i], hist_move_size[i],
dest + hist_move_dest[i]);
}
}
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
double** sample_values,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool>& is_feature_added) {
if (is_finish_load_) { return; }
for (auto fidx : feature_need_push_zeros_) {
if (is_feature_added[fidx]) { continue; }
const int group = feature2group_[fidx];
const int sub_feature = feature2subfeature_[fidx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f);
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
std::vector<bool> is_feature_added(num_features_, false);
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
is_feature_added[feature_idx] = true;
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
FinishOneRow(tid, row_idx, is_feature_added);
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubrow(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
MultiValBin* GetMultiBinFromSparseFeatures() const;
MultiValBin* GetMultiBinFromAllFeatures() const;
TrainingShareStates* GetShareStates(
score_t* gradients, score_t* hessians,
const std::vector<int8_t>& is_feature_used, bool is_constant_hessian,
bool force_colwise, bool force_rowwise) const;
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void InitTrain(const std::vector<int8_t>& is_feature_used,
TrainingShareStates* share_state) const;
template <bool USE_INDICES, bool USE_HESSIAN>
void ConstructHistogramsInner(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices,
data_size_t num_data, const score_t* gradients,
const score_t* hessians,
score_t* ordered_gradients,
score_t* ordered_hessians,
TrainingShareStates* share_state,
hist_t* hist_data) const;
template <bool USE_INDICES, bool ORDERED>
void ConstructHistogramsMultiVal(const data_size_t* data_indices,
data_size_t num_data,
const score_t* gradients,
const score_t* hessians,
TrainingShareStates* share_state,
hist_t* hist_data) const;
inline void ConstructHistograms(
const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
TrainingShareStates* share_state, hist_t* hist_data) const {
if (num_data <= 0) {
return;
}
bool use_indices = data_indices != nullptr && (num_data < num_data_);
if (share_state->is_constant_hessian) {
if (use_indices) {
ConstructHistogramsInner<true, false>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
} else {
ConstructHistogramsInner<false, false>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
}
} else {
if (use_indices) {
ConstructHistogramsInner<true, true>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
} else {
ConstructHistogramsInner<false, true>(
is_feature_used, data_indices, num_data, gradients, hessians,
ordered_gradients, ordered_hessians, share_state, hist_data);
}
}
}
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, hist_t* data) const;
inline data_size_t Split(int feature, const uint32_t* threshold,
int num_threshold, bool default_left,
const data_size_t* data_indices,
data_size_t cnt, data_size_t* lte_indices,
data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(
sub_feature, threshold, num_threshold, default_left, data_indices,
cnt, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline bool IsMultiGroup(int i) const {
return feature_groups_[i]->is_multi_val_;
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
std::unordered_set<std::string> feature_name_set;
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
if (feature_name_set.count(feature_name) > 0) {
Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str());
}
feature_name_set.insert(feature_name);
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; ++i) {
int fidx = used_feature_map_[i];
if (fidx < 0) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info_string());
}
}
return bufs;
}
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void AddFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
std::vector<int> feature_need_push_zeros_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
ast-dump-openmp-begin-declare-variant_13.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int also_before(void) {
return 1;
}
#pragma omp begin declare variant match(user = {condition(1)})
int also_after(void) {
return 0;
}
int also_before(void) {
return 0;
}
#pragma omp end declare variant
int also_after(void) {
return 2;
}
int test(void) {
// Should return 0.
return also_after() + also_before();
}
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[user={condition(...)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[user={condition(...)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit user={condition(1)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:16, line:25:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-PseudoObjectExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_27:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_29:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_30:0x[a-z0-9]*]] <line:10:1, line:24:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[user={condition(...)}]' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_32:0x[a-z0-9]*]] <line:24:25, col:37> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_33:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_36:0x[a-z0-9]*]] <line:13:1, line:24:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6]] 'also_before[user={condition(...)}]' 'int ({{.*}})'
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
kernel_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16);
int q = 0;
for (; q + 3 < outch; q += 4)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
float* g0 = kernel_pack4.channel(q / 4);
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k0[1];
g0[5] = k1[1];
g0[6] = k2[1];
g0[7] = k3[1];
g0[8] = k0[2];
g0[9] = k1[2];
g0[10] = k2[2];
g0[11] = k3[2];
g0[12] = k0[3];
g0[13] = k1[3];
g0[14] = k2[3];
g0[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start;
remain_size_start = 0;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
__m128 _r1 = _mm_loadu_ps(img0 + 4);
__m128 _r2 = _mm_loadu_ps(img0 + 8);
__m128 _r3 = _mm_loadu_ps(img0 + 12);
_mm_storeu_ps(tmpptr, _r0);
_mm_storeu_ps(tmpptr + 4, _r1);
_mm_storeu_ps(tmpptr + 8, _r2);
_mm_storeu_ps(tmpptr + 12, _r3);
tmpptr += 16;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
__m128 _r1 = _mm_loadu_ps(img0 + 4);
_mm_storeu_ps(tmpptr, _r0);
_mm_storeu_ps(tmpptr + 4, _r1);
tmpptr += 8;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
_mm_storeu_ps(tmpptr, _r0);
tmpptr += 4;
img0 += bottom_blob.cstep * 4;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 4);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _mm_loadu_ps(biasptr);
__m128 _sum2 = _mm_loadu_ps(biasptr);
__m128 _sum3 = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val00 = _mm_load1_ps(tmpptr);
__m128 _val01 = _mm_load1_ps(tmpptr + 1);
__m128 _val02 = _mm_load1_ps(tmpptr + 2);
__m128 _val03 = _mm_load1_ps(tmpptr + 3);
__m128 _val10 = _mm_load1_ps(tmpptr + 4);
__m128 _val11 = _mm_load1_ps(tmpptr + 5);
__m128 _val12 = _mm_load1_ps(tmpptr + 6);
__m128 _val13 = _mm_load1_ps(tmpptr + 7);
__m128 _val20 = _mm_load1_ps(tmpptr + 8);
__m128 _val21 = _mm_load1_ps(tmpptr + 9);
__m128 _val22 = _mm_load1_ps(tmpptr + 10);
__m128 _val23 = _mm_load1_ps(tmpptr + 11);
__m128 _val30 = _mm_load1_ps(tmpptr + 12);
__m128 _val31 = _mm_load1_ps(tmpptr + 13);
__m128 _val32 = _mm_load1_ps(tmpptr + 14);
__m128 _val33 = _mm_load1_ps(tmpptr + 15);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0);
_sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1);
_sum2 = _mm_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm_comp_fmadd_ps(_w3, _val23, _sum2);
_sum3 = _mm_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm_comp_fmadd_ps(_w3, _val33, _sum3);
#else
_sum0 = _mm_add_ps(_mm_mul_ps(_w0, _val00), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w1, _val01), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w2, _val02), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w3, _val03), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_w0, _val10), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w1, _val11), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w2, _val12), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w3, _val13), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_w0, _val20), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w1, _val21), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w2, _val22), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w3, _val23), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_w0, _val30), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w1, _val31), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w2, _val32), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w3, _val33), _sum3);
#endif
tmpptr += 16;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val00 = _mm_load1_ps(tmpptr);
__m128 _val01 = _mm_load1_ps(tmpptr + 1);
__m128 _val02 = _mm_load1_ps(tmpptr + 2);
__m128 _val03 = _mm_load1_ps(tmpptr + 3);
__m128 _val10 = _mm_load1_ps(tmpptr + 4);
__m128 _val11 = _mm_load1_ps(tmpptr + 5);
__m128 _val12 = _mm_load1_ps(tmpptr + 6);
__m128 _val13 = _mm_load1_ps(tmpptr + 7);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0);
_sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1);
#else
_sum0 = _mm_add_ps(_mm_mul_ps(_w0, _val00), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w1, _val01), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w2, _val02), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w3, _val03), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_w0, _val10), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w1, _val11), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w2, _val12), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w3, _val13), _sum1);
#endif
tmpptr += 8;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
outptr0 += 8;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum = _mm_comp_fmadd_ps(_w0, _val0, _sum);
_sum = _mm_comp_fmadd_ps(_w1, _val1, _sum);
_sum = _mm_comp_fmadd_ps(_w2, _val2, _sum);
_sum = _mm_comp_fmadd_ps(_w3, _val3, _sum);
#else
_sum = _mm_add_ps(_mm_mul_ps(_w0, _val0), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w1, _val1), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w2, _val2), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w3, _val3), _sum);
#endif
tmpptr += 4;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum);
outptr0 += 4;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _v = _mm_load_ps(r0);
_mm_store_ps(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_sse(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
thread_scale_tlp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <assert.h>
#include "zmtest_abslock.h"
#define TEST_NITER 100000
char cache_lines[640] = {0};
int indices [] = {3,6,1,7,0,2,9,4,8,5};
static void test_thruput()
{
unsigned nthreads = omp_get_max_threads();
zm_abslock_t lock;
zm_abslock_init(&lock);
int cur_nthreads = 0;
printf("#Thread \t HP:Thruput[acqs/s] \t LP Thruput[acqs/s]\n");
for(cur_nthreads=1; cur_nthreads <= nthreads; cur_nthreads++) {
double start_times[cur_nthreads];
double stop_times[cur_nthreads];
#pragma omp parallel num_threads(cur_nthreads)
{
int tid = omp_get_thread_num();
int iter;
start_times[tid] = omp_get_wtime();
for(iter=0; iter<TEST_NITER; iter++){
int err;
if(tid % 2 == 0)
zm_abslock_acquire(&lock);
else
zm_abslock_acquire_l(&lock);
/* Computation */
for(int i = 0; i < 10; i++)
cache_lines[indices[i]] += cache_lines[indices[9-i]];
zm_abslock_release(&lock);
}
stop_times[tid] = omp_get_wtime();
} /* End of omp parallel*/
double htimes = 0.0, ltimes = 0.0;
int i;
for(i=0; i < cur_nthreads; i++) {
if (i % 2 == 0) htimes += (stop_times[i] - start_times[i]);
else ltimes += (stop_times[i] - start_times[i]);
}
int hthreads = (cur_nthreads % 2 == 0) ? cur_nthreads / 2 : (cur_nthreads + 1) / 2;
int lthreads = (cur_nthreads % 2 == 0) ? hthreads : hthreads - 1;
assert(hthreads + lthreads == cur_nthreads);
if(lthreads > 0)
printf("%d \t %lf \t %lf\n", cur_nthreads, ((double)TEST_NITER*hthreads)/htimes, ((double)TEST_NITER*lthreads)/ltimes);
else
printf("%d \t %f \t %f\n", cur_nthreads, ((double)TEST_NITER*hthreads)/htimes, -1.0);
}
} /* end test_locked_counter() */
int main(int argc, char **argv)
{
test_thruput();
return 0;
} /* end main() */
|
15_omp_task.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s --check-prefix=CHECK-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
extern void MPI_call(void*);
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: %x = alloca
// check-inst: %0 = bitcast i32* %x to i8*
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-inst-not: __typeart_alloc_stack_omp
int x;
#pragma omp parallel
{
#pragma omp task
{ MPI_call(&x); }
}
}
// FIXME one alloca is of the anon struct detected as OMP task struct related (need refinement of condition?)
// The Pattern: a = alloca struct; b = task_alloc; mem_cpy a to b;
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 2
// CHECK-NEXT: Global : 0
// CHECK-opt: TypeArtPass [Heap & Stack]
// CHECK-opt-NEXT: Malloc : 0
// CHECK-opt-NEXT: Free : 0
// CHECK-opt-NEXT: Alloca : 1
// CHECK-opt-NEXT: Global : 0 |
prand.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Source/prand: parallel random number generator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// A simple thread-safe parallel pseudo-random nuumber generator.
#include "GraphBLAS.h"
#undef GB_PUBLIC
#define GB_LIBRARY
#include "graphblas_demos.h"
//------------------------------------------------------------------------------
// prand macros
//------------------------------------------------------------------------------
// Generate the next seed, and extract a random 15-bit value from a seed.
#define PRAND_RECURENCE(seed) ((seed) * 1103515245 + 12345)
#define PRAND_15_MAX 32767
#define PRAND_15(seed) (((seed)/65536) % (PRAND_15_MAX + 1))
//------------------------------------------------------------------------------
// global types and operators
//------------------------------------------------------------------------------
// These can be shared by all threads in a user application, and thus are
// safely declared as global objects.
GrB_Type prand_type = NULL ;
GrB_UnaryOp prand_next_op = NULL ;
GrB_UnaryOp prand_iget_op = NULL ;
GrB_UnaryOp prand_xget_op = NULL ;
GrB_BinaryOp prand_dup_op = NULL ;
//------------------------------------------------------------------------------
// prand_next_op: unary operator to construct the next seed
//------------------------------------------------------------------------------
// z = f(x), where x is the old seed and z is the new seed.
GB_PUBLIC
void prand_next_f (prand_t *z, const prand_t *x)
{
for (int k = 0 ; k < 5 ; k++)
{
z->seed [k] = PRAND_RECURENCE (x->seed [k]) ;
}
}
//------------------------------------------------------------------------------
// prand_iget: unary operator to construct get a random integer from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is an unsigned 64-bit
// pseudo-random number constructed from the seed.
GB_PUBLIC
void prand_iget_f (uint64_t *z, const prand_t *x)
{
uint64_t i = 0 ;
for (int k = 0 ; k < 5 ; k++)
{
i = PRAND_15_MAX * i + PRAND_15 (x->seed [k]) ;
}
(*z) = i ;
}
//------------------------------------------------------------------------------
// prand_xget: unary operator to construct get a random double from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is a double precision
// pseudo-random number constructed from the seed, in the range 0 to 1.
GB_PUBLIC
void prand_xget_f (double *z, prand_t *x)
{
uint64_t i ;
prand_iget_f (&i, x) ;
(*z) = ((double) i) / ((double) UINT64_MAX) ;
}
//------------------------------------------------------------------------------
// prand_dup: binary operator to build a vector
//------------------------------------------------------------------------------
// This is required by GrB_Vector_build, but is never called since no
// duplicates are created. This is the SECOND operator for the prand_type.
#if defined ( __INTEL_COMPILER )
// disable icc warnings
// 869: unused parameters
#pragma warning (disable: 869 )
#elif defined ( __GNUC__ )
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
GB_PUBLIC
void prand_dup_f (prand_t *z, /* unused: */ const prand_t *x, const prand_t *y)
{
(*z) = (*y) ;
}
//------------------------------------------------------------------------------
// prand_init: create the random seed type and its operators
//------------------------------------------------------------------------------
#define PRAND_FREE_ALL \
{ \
GrB_Type_free (&prand_type) ; \
GrB_UnaryOp_free (&prand_next_op) ; \
GrB_UnaryOp_free (&prand_iget_op) ; \
GrB_UnaryOp_free (&prand_xget_op) ; \
GrB_BinaryOp_free (&prand_dup_op) ; \
}
#undef OK
#define OK(method) \
{ \
GrB_Info info = method ; \
if (info != GrB_SUCCESS) \
{ \
PRAND_FREE_ALL ; \
printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \
return (info) ; \
} \
}
GB_PUBLIC
GrB_Info prand_init ( )
{
prand_type = NULL ;
prand_next_op = NULL ;
prand_iget_op = NULL ;
prand_xget_op = NULL ;
prand_dup_op = NULL ;
OK (GrB_Type_new (&prand_type, sizeof (prand_t))) ;
OK (GrB_UnaryOp_new (&prand_next_op, (GxB_unary_function) prand_next_f,
prand_type, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_iget_op, (GxB_unary_function) prand_iget_f,
GrB_UINT64, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_xget_op, (GxB_unary_function) prand_xget_f,
GrB_FP64, prand_type)) ;
OK (GrB_BinaryOp_new (&prand_dup_op, (GxB_binary_function) prand_dup_f,
prand_type, prand_type, prand_type)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_finalize: free the random seed type and its operators
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info prand_finalize ( )
{
PRAND_FREE_ALL ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_next: get the next random numbers
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info prand_next
(
GrB_Vector Seed
)
{
return (GrB_Vector_apply (Seed, NULL, NULL, prand_next_op, Seed, NULL)) ;
}
//------------------------------------------------------------------------------
// prand_seed: create a vector of random seeds
//------------------------------------------------------------------------------
// Returns a vector of random seed values.
#define PRAND_FREE_WORK \
{ \
free (I) ; \
free (X) ; \
}
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL \
{ \
PRAND_FREE_WORK ; \
GrB_Vector_free (Seed) ; \
}
GB_PUBLIC
GrB_Info prand_seed
(
GrB_Vector *Seed, // vector of random number seeds
int64_t seed, // scalar input seed
GrB_Index n, // size of Seed to create
int nthreads // # of threads to use (OpenMP default if <= 0)
)
{
GrB_Index *I = NULL ;
prand_t *X = NULL ;
// allocate the Seed vector
OK (GrB_Vector_new (Seed, prand_type, n)) ;
// allocate the I and X arrays
I = (GrB_Index *) malloc ((n+1) * sizeof (GrB_Index)) ;
X = (prand_t *) malloc ((n+1) * sizeof (prand_t)) ;
if (I == NULL || X == NULL)
{
PRAND_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// determine # of threads to use
int nthreads_max = 1 ;
#ifdef _OPENMP
nthreads_max = omp_get_max_threads ( ) ;
#endif
if (nthreads <= 0 || nthreads > nthreads_max)
{
nthreads = nthreads_max ;
}
// construct the tuples for the initial seeds
int64_t i, len = (int64_t) n ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0 ; i < len ; i++)
{
I [i] = i ;
for (int k = 0 ; k < 5 ; k++)
{
X [i].seed [k] = (100000000*(seed) + 10*i + k + 1) ;
}
}
// build the Seed vector
OK (GrB_Vector_build_UDT (*Seed, I, X, n, prand_dup_op)) ;
// free workspace
PRAND_FREE_WORK ;
// advance to the first set of random numbers
OK (prand_next (*Seed)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_print: print the Seed vector
//------------------------------------------------------------------------------
// This is meant for testing, not production use.
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL ;
GB_PUBLIC
GrB_Info prand_print
(
GrB_Vector Seed,
int pr // 0: print nothing, 1: print some, 2: print all
)
{
if (pr > 0)
{
GrB_Index n ;
OK (GrB_Vector_nvals (&n, Seed)) ;
printf ("\nSeed: length %g\n", (double) n) ;
prand_t x ;
for (int k = 0 ; k < 5 ; k++) x.seed [k] = -1 ;
for (int64_t i = 0 ; i < (int64_t) n ; i++)
{
if (GrB_Vector_extractElement_UDT (&x, Seed, i) == GrB_SUCCESS)
{
printf ("%g: ", (double) i) ;
for (int k = 0 ; k < 5 ; k++)
{
printf (" %.18g", (double) (x.seed [k])) ;
}
printf ("\n") ;
}
if (pr == 1 && i > 10) break ;
}
}
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_iget: return a vector of random uint64 integers
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info prand_iget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_Vector_apply (X, NULL, NULL, prand_iget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
//------------------------------------------------------------------------------
// prand_xget: return a vector of random doubles, in range 0 to 1 inclusive
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info prand_xget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_Vector_apply (X, NULL, NULL, prand_xget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
|
claset.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlaset.c, normal z -> c, Fri Sep 28 17:38:08 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/******************************************************************************/
int plasma_claset(plasma_enum_t uplo,
int m, int n,
plasma_complex32_t alpha, plasma_complex32_t beta,
plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_laset(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_claset(uplo, alpha, beta, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/******************************************************************************/
void plasma_omp_claset(plasma_enum_t uplo,
plasma_complex32_t alpha, plasma_complex32_t beta,
plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pclaset(uplo, alpha, beta, A, sequence, request);
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/shear.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5
#define RightShiftOperator 0xf6
#define LessThanEqualOperator 0xf7
#define GreaterThanEqualOperator 0xf8
#define EqualOperator 0xf9
#define NotEqualOperator 0xfa
#define LogicalAndOperator 0xfb
#define LogicalOrOperator 0xfc
struct _FxInfo
{
const Image
*images;
MagickBooleanType
matte;
char
*expression;
SplayTreeInfo
*colors,
*symbols;
ResampleFilter
**resample_filter;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *image,const char *expression)
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression)
{
char
fx_op[2];
FxInfo
*fx_info;
register long
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=image;
fx_info->matte=image->matte;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->resample_filter=(ResampleFilter **) AcquireQuantumMemory(
GetImageListLength(fx_info->images),sizeof(*fx_info->resample_filter));
if (fx_info->resample_filter == (ResampleFilter **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (long) GetImageListLength(fx_info->images); i++)
fx_info->resample_filter[i]=AcquireResampleFilter(GetImageFromList(
fx_info->images,i),fx_info->exception);
fx_info->expression=ConstantString(expression);
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
if ((strstr(fx_info->expression,"e+") != (char *) NULL) ||
(strstr(fx_info->expression,"e-") != (char *) NULL))
{
/*
Convert scientific notation.
*/
(void) SubstituteString(&fx_info->expression,"0e+","0*10^");
(void) SubstituteString(&fx_info->expression,"1e+","1*10^");
(void) SubstituteString(&fx_info->expression,"2e+","2*10^");
(void) SubstituteString(&fx_info->expression,"3e+","3*10^");
(void) SubstituteString(&fx_info->expression,"4e+","4*10^");
(void) SubstituteString(&fx_info->expression,"5e+","5*10^");
(void) SubstituteString(&fx_info->expression,"6e+","6*10^");
(void) SubstituteString(&fx_info->expression,"7e+","7*10^");
(void) SubstituteString(&fx_info->expression,"8e+","8*10^");
(void) SubstituteString(&fx_info->expression,"9e+","9*10^");
(void) SubstituteString(&fx_info->expression,"0e-","0*10^-");
(void) SubstituteString(&fx_info->expression,"1e-","1*10^-");
(void) SubstituteString(&fx_info->expression,"2e-","2*10^-");
(void) SubstituteString(&fx_info->expression,"3e-","3*10^-");
(void) SubstituteString(&fx_info->expression,"4e-","4*10^-");
(void) SubstituteString(&fx_info->expression,"5e-","5*10^-");
(void) SubstituteString(&fx_info->expression,"6e-","6*10^-");
(void) SubstituteString(&fx_info->expression,"7e-","7*10^-");
(void) SubstituteString(&fx_info->expression,"8e-","8*10^-");
(void) SubstituteString(&fx_info->expression,"9e-","9*10^-");
}
/*
Convert complex to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum GenerateNoise(const Quantum pixel,const NoiseType noise_type,
const MagickRealType attenuate)
{
#define NoiseEpsilon (attenuate*1.0e-5)
#define SigmaUniform ScaleCharToQuantum((unsigned char) (attenuate*4.0+0.5))
#define SigmaGaussian ScaleCharToQuantum((unsigned char) (attenuate*4.0+0.5))
#define SigmaImpulse (attenuate*0.10)
#define SigmaLaplacian ScaleCharToQuantum((unsigned char) (attenuate*10.0+0.5))
#define SigmaMultiplicativeGaussian \
ScaleCharToQuantum((unsigned char) (attenuate*1.0+0.5))
#define SigmaPoisson (attenuate*0.05)
#define TauGaussian ScaleCharToQuantum((unsigned char) (attenuate*20.0+0.5))
MagickRealType
alpha,
beta,
noise,
sigma;
alpha=GetPseudoRandomValue();
if (alpha == 0.0)
alpha=1.0;
switch (noise_type)
{
case UniformNoise:
default:
{
noise=(MagickRealType) pixel+SigmaUniform*(alpha-0.5);
break;
}
case GaussianNoise:
{
MagickRealType
tau;
beta=GetPseudoRandomValue();
sigma=sqrt(-2.0*log((double) alpha))*cos((double) (2.0*MagickPI*beta));
tau=sqrt(-2.0*log((double) alpha))*sin((double) (2.0*MagickPI*beta));
noise=(MagickRealType) pixel+sqrt((double) pixel)*SigmaGaussian*sigma+
TauGaussian*tau;
break;
}
case MultiplicativeGaussianNoise:
{
if (alpha <= NoiseEpsilon)
sigma=(MagickRealType) QuantumRange;
else
sigma=sqrt(-2.0*log((double) alpha));
beta=GetPseudoRandomValue();
noise=(MagickRealType) pixel+pixel*SigmaMultiplicativeGaussian*sigma/2.0*
cos((double) (2.0*MagickPI*beta));
break;
}
case ImpulseNoise:
{
if (alpha < (SigmaImpulse/2.0))
noise=0.0;
else
if (alpha >= (1.0-(SigmaImpulse/2.0)))
noise=(MagickRealType) QuantumRange;
else
noise=(MagickRealType) pixel;
break;
}
case LaplacianNoise:
{
if (alpha <= 0.5)
{
if (alpha <= NoiseEpsilon)
noise=(MagickRealType) pixel-(MagickRealType) QuantumRange;
else
noise=(MagickRealType) pixel+ScaleCharToQuantum((unsigned char)
(SigmaLaplacian*log((double) (2.0*alpha))+0.5));
break;
}
beta=1.0-alpha;
if (beta <= (0.5*NoiseEpsilon))
noise=(MagickRealType) (pixel+QuantumRange);
else
noise=(MagickRealType) pixel-ScaleCharToQuantum((unsigned char)
(SigmaLaplacian*log((double) (2.0*beta))+0.5));
break;
}
case PoissonNoise:
{
MagickRealType
poisson;
register long
i;
poisson=exp(-SigmaPoisson*(double) ScaleQuantumToChar(pixel));
for (i=0; alpha > poisson; i++)
{
beta=GetPseudoRandomValue();
alpha=alpha*beta;
}
noise=(MagickRealType) ScaleCharToQuantum((unsigned char)
(i/SigmaPoisson));
break;
}
case RandomNoise:
{
noise=(MagickRealType) QuantumRange*GetPseudoRandomValue();
break;
}
}
return(RoundToQuantum(noise));
}
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
const char
*option;
Image
*noise_image;
long
progress,
y;
MagickBooleanType
status;
MagickRealType
attenuate;
ViewInfo
*image_view,
*noise_view;
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageProperty(image,"attenuate");
if (option != (char *) NULL)
attenuate=atof(option);
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
noise_view=AcquireCacheView(noise_image);
for (y=0; y < (long) image->rows; y++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*noise_indexes;
register long
x;
register PixelPacket
*q;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=GenerateNoise(p->red,noise_type,attenuate);
if ((channel & GreenChannel) != 0)
q->green=GenerateNoise(p->green,noise_type,attenuate);
if ((channel & BlueChannel) != 0)
q->blue=GenerateNoise(p->blue,noise_type,attenuate);
if ((channel & OpacityChannel) != 0)
q->opacity=GenerateNoise(p->opacity,noise_type,attenuate);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
noise_indexes[x]=(IndexPacket) GenerateNoise(indexes[x],noise_type,
attenuate);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(noise_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageType(clone_image,GrayscaleType);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image);
(void) NegateImage(charcoal_image,MagickFalse);
(void) SetImageType(charcoal_image,GrayscaleType);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
GeometryInfo
geometry_info;
Image
*colorize_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ViewInfo
*colorize_view,
*image_view;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
colorize_view=AcquireCacheView(colorize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
q->red=(Quantum) ((p->red*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0);
q->green=(Quantum) ((p->green*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0);
q->blue=(Quantum) ((p->blue*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0);
q->opacity=(Quantum) ((p->opacity*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0);
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const unsigned long order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const unsigned long order,const double *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const unsigned long order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const unsigned long order,const double *kernel,
ExceptionInfo *exception)
{
#define ConvolveImageTag "Convolve/Image"
double
*normal_kernel;
Image
*convolve_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
bias,
gamma;
register long
i;
unsigned long
width;
ViewInfo
*convolve_view,
*image_view;
/*
Initialize convolve image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=order;
if ((width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
convolve_image=CloneImage(image,0,0,MagickTrue,exception);
if (convolve_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(convolve_image,DirectClass) == MagickFalse)
{
InheritException(exception,&convolve_image->exception);
convolve_image=DestroyImage(convolve_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
long
u,
v;
register const double
*k;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ConvolveImage with %ldx%ld kernel:",width,width);
message=AcquireString("");
k=kernel;
for (v=0; v < (long) width; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%ld: ",v);
(void) ConcatenateString(&message,format);
for (u=0; u < (long) width; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Normalize kernel.
*/
normal_kernel=(double *) AcquireQuantumMemory(width*width,
sizeof(*normal_kernel));
if (normal_kernel == (double *) NULL)
{
convolve_image=DestroyImage(convolve_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
gamma=0.0;
for (i=0; i < (long) (width*width); i++)
gamma+=kernel[i];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (i=0; i < (long) (width*width); i++)
normal_kernel[i]=gamma*kernel[i];
/*
Convolve image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
bias=image->bias;
image_view=AcquireCacheView(image);
convolve_view=AcquireCacheView(convolve_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*convolve_indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((long) width/2L),y-(long) (width/
2L),image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(convolve_view,0,y,convolve_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
convolve_indexes=GetCacheViewAuthenticIndexQueue(convolve_view);
for (x=0; x < (long) image->columns; x++)
{
long
j,
v;
MagickPixelPacket
pixel;
register const double
*k;
register long
u;
pixel=zero;
k=normal_kernel;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
pixel.red+=(*k)*(p+u+j)->red;
pixel.green+=(*k)*(p+u+j)->green;
pixel.blue+=(*k)*(p+u+j)->blue;
k++;
}
j+=image->columns+width;
}
if ((channel & RedChannel) != 0)
q->red=RoundToQuantum(pixel.red+bias);
if ((channel & GreenChannel) != 0)
q->green=RoundToQuantum(pixel.green+bias);
if ((channel & BlueChannel) != 0)
q->blue=RoundToQuantum(pixel.blue+bias);
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
j=0;
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
k++;
}
j+=image->columns+width;
}
q->opacity=RoundToQuantum(pixel.opacity+bias);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
k=normal_kernel;
j=0;
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
pixel.index+=(*k)*indexes[x+u+j];
k++;
}
j+=image->columns+width;
}
convolve_indexes[x]=RoundToQuantum(pixel.index+bias);
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
(p+u+j)->opacity));
pixel.red+=(*k)*alpha*(p+u+j)->red;
pixel.green+=(*k)*alpha*(p+u+j)->green;
pixel.blue+=(*k)*alpha*(p+u+j)->blue;
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=alpha;
k++;
}
j+=image->columns+width;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=RoundToQuantum(gamma*pixel.red+bias);
if ((channel & GreenChannel) != 0)
q->green=RoundToQuantum(gamma*pixel.green+bias);
if ((channel & BlueChannel) != 0)
q->blue=RoundToQuantum(gamma*pixel.blue+bias);
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
j=0;
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
k++;
}
j+=image->columns+width;
}
q->opacity=RoundToQuantum(pixel.opacity+bias);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
k=normal_kernel;
j=0;
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
(p+u+j)->opacity));
pixel.index+=(*k)*alpha*indexes[x+u+j];
k++;
}
j+=image->columns+width;
}
convolve_indexes[x]=RoundToQuantum(gamma*pixel.index+bias);
}
}
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(convolve_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ConvolveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
convolve_image->type=image->type;
convolve_view=DestroyCacheView(convolve_view);
image_view=DestroyCacheView(image_view);
normal_kernel=(double *) RelinquishMagickMemory(normal_kernel);
if (status == MagickFalse)
convolve_image=DestroyImage(convolve_image);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register long
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=0; i < (long) GetImageListLength(fx_info->images); i++)
fx_info->resample_filter[i]=DestroyResampleFilter(
fx_info->resample_filter[i]);
fx_info->resample_filter=(ResampleFilter **) RelinquishMagickMemory(
fx_info->resample_filter);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static inline Quantum ApplyEvaluateOperator(Quantum pixel,
const MagickEvaluateOperator op,const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch(op)
{
case UndefinedEvaluateOperator:
break;
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((unsigned long) pixel & (unsigned long)
(value+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType)GenerateNoise(pixel,GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType)GenerateNoise(pixel,ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType)GenerateNoise(pixel,LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((unsigned long) pixel << (unsigned long)
(value+0.5));
break;
}
case LogEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) MagickMax((double) pixel,value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateNoise(pixel,MultiplicativeGaussianNoise,
value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((unsigned long) pixel | (unsigned long)
(value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateNoise(pixel,PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((unsigned long) pixel >> (unsigned long)
(value+0.5));
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel < value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel < value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateNoise(pixel,UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((unsigned long) pixel ^ (unsigned long)
(value+0.5));
break;
}
}
return(RoundToQuantum(result));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,AllChannels,op,value,exception);
return(status);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image "
long
progress,
y;
MagickBooleanType
status;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ApplyEvaluateOperator(q->red,op,value);
if ((channel & GreenChannel) != 0)
q->green=ApplyEvaluateOperator(q->green,op,value);
if ((channel & BlueChannel) != 0)
q->blue=ApplyEvaluateOperator(q->blue,op,value);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=ApplyEvaluateOperator(q->opacity,op,value);
else
q->opacity=(Quantum) QuantumRange-ApplyEvaluateOperator(
(Quantum) (QuantumRange-q->opacity),op,value);
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
indexes[x]=(IndexPacket) ApplyEvaluateOperator(indexes[x],op,value);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickRealType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const long x,const long y,
% MagickRealType *alpha,Exceptioninfo *exception)
% MagickRealType FxEvaluateExpression(FxInfo *fx_info,
% MagickRealType *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
key[MaxTextExtent],
statistic[MaxTextExtent];
const char
*value;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
switch (*++p) /* e.g. depth.r */
{
case 'r': channel=RedChannel; break;
case 'g': channel=GreenChannel; break;
case 'b': channel=BlueChannel; break;
case 'c': channel=CyanChannel; break;
case 'm': channel=MagentaChannel; break;
case 'y': channel=YellowChannel; break;
case 'k': channel=BlackChannel; break;
default: break;
}
(void) FormatMagickString(key,MaxTextExtent,"%p.%ld.%s",image,(long) channel,
symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
return(QuantumScale*atof(value));
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
unsigned long
depth;
depth=GetImageChannelDepth(image,channel,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%lu",depth);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatMagickString(statistic,MaxTextExtent,"%g",
standard_deviation);
}
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*atof(statistic));
}
static MagickRealType
FxEvaluateSubexpression(FxInfo *,const ChannelType,const long,const long,
const char *,MagickRealType *,ExceptionInfo *);
static inline MagickRealType FxMax(FxInfo *fx_info,const ChannelType channel,
const long x,const long y,const char *expression,ExceptionInfo *exception)
{
MagickRealType
alpha,
beta;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception);
return((MagickRealType) MagickMax((double) alpha,(double) beta));
}
static inline MagickRealType FxMin(FxInfo *fx_info,ChannelType channel,
const long x,const long y,const char *expression,ExceptionInfo *exception)
{
MagickRealType
alpha,
beta;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception);
return((MagickRealType) MagickMin((double) alpha,(double) beta));
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register long
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static MagickRealType FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const long x,const long y,const char *expression,ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent],
symbol[MaxTextExtent];
const char
*p,
*value;
Image
*image;
MagickPixelPacket
pixel;
MagickRealType
alpha,
beta;
PointInfo
point;
register long
i;
size_t
length;
unsigned long
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) *(p+1)) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
i=(long) (alpha+0.5);
p++;
}
if (*p == '.')
p++;
}
if ((isalpha((int) *(p+1)) == 0) && (*p == 'p'))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(long) length;
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
(void) ResamplePixelColor(fx_info->resample_filter[i],point.x,point.y,&pixel);
if ((strlen(p) > 2) &&
(LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
GetPathComponent(p,BasePath,name);
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=strlen(name);
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
if (pixel.matte == MagickFalse)
{
fx_info->matte=MagickFalse;
return(1.0);
}
return((MagickRealType) (QuantumScale*(QuantumRange-pixel.opacity)));
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((MagickRealType) (QuantumScale*(QuantumRange-pixel.opacity)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
return(0.0);
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((MagickRealType) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(RoundToQuantum(pixel.red),RoundToQuantum(pixel.green),
RoundToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*MagickPixelIntensityToQuantum(&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((MagickRealType) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((MagickRealType) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(RoundToQuantum(pixel.red),RoundToQuantum(pixel.green),
RoundToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.2126*pixel.red+0.7152*pixel.green+0.0722*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((MagickRealType) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((MagickRealType) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((MagickRealType) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((MagickRealType) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((MagickRealType) image->page.y);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(RoundToQuantum(pixel.red),RoundToQuantum(pixel.green),
RoundToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((MagickRealType) fx_info->images->scene);
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((MagickRealType) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
MagickRealType
depth;
depth=(MagickRealType) GetImageChannelDepth(image,channel,
fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return((MagickRealType) atof(value));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
unsigned long
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
continue;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((char) c)) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((char) *expression)) != 0) ||
(strchr("(",(int) *expression) != (char *) NULL)) ||
((isdigit((int) ((char) c)) == 0) &&
(isdigit((int) ((char) *expression)) != 0))) &&
(strchr("xy",(int) *expression) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static MagickRealType FxEvaluateSubexpression(FxInfo *fx_info,
const ChannelType channel,const long x,const long y,const char *expression,
MagickRealType *beta,ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent];
MagickRealType
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity != UndefinedException)
return(0.0);
while (isspace((int) *expression) != 0)
expression++;
if (*expression == '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"MissingExpression","`%s'",expression);
return(0.0);
}
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) (~(unsigned long) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info,
channel,x,y,++p,beta,exception));
return(*beta);
}
case '*':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
if (*beta == 0.0)
{
if (exception->severity == UndefinedException)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=fabs(floor(((double) *beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod((double) alpha,(double) *beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((unsigned long) (alpha+0.5) << (unsigned long)
(gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((unsigned long) (alpha+0.5) >> (unsigned long)
(gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) <= MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) > MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((unsigned long) (alpha+0.5) & (unsigned long)
(gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((unsigned long) (alpha+0.5) | (unsigned long)
(gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
MagickRealType
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs((double) alpha) > MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception);
return(gamma);
}
case '=':
{
char
numeric[MaxTextExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
(void) FormatMagickString(numeric,MaxTextExtent,"%g",(double) *beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(void) CopyMagickString(subexpression,expression+1,MaxTextExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return((MagickRealType) (~(unsigned long) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) fabs((double) alpha));
}
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) acos((double) alpha));
}
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) asin((double) alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return(((long) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) atan2((double) alpha,(double) *beta));
}
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) atan((double) alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) ceil((double) alpha));
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) cosh((double) alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) cos((double) alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case OpacityChannel: type="opacity"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case OpacityChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
(void) fprintf(stderr,"%s[%ld,%ld].%s: %s=%g\n",
fx_info->images->filename,y,x,type,subexpression,(double) alpha);
return(0.0);
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return((MagickRealType) MagickEpsilon);
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) exp((double) alpha));
}
if (LocaleCompare(expression,"e") == 0)
return((MagickRealType) 2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) floor((double) alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) hypot((double) alpha,(double) *beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) floor(alpha+0.5));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) log((double) alpha));
}
if (LocaleNCompare(expression,"logtwo",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) log10((double) alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) log10((double) alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return((MagickRealType) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
return(FxMax(fx_info,channel,x,y,expression+3,exception));
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
return(FxMin(fx_info,channel,x,y,expression+3,exception));
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) fmod((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"pi") == 0)
return((MagickRealType) MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) pow((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return((MagickRealType) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return((MagickRealType) QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
return((MagickRealType) GetPseudoRandomValue());
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (alpha >= 0.0)
return((MagickRealType) floor((double) alpha+0.5));
return((MagickRealType) ceil((double) alpha-0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sinh((double) alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) sin((double) alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sqrt((double) alpha));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) tanh((double) alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) tan((double) alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=strtod(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
MagickRealType *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const long x,const long y,MagickRealType *alpha,
ExceptionInfo *exception)
{
MagickRealType
beta;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta,
exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register long
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (long) GetPixelCacheMaximumThreads(); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
return((FxInfo **) RelinquishMagickMemory(fx_info));
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
MagickRealType
alpha;
register long
i;
unsigned long
number_threads;
number_threads=GetPixelCacheMaximumThreads();
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
return((FxInfo **) NULL);
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0,exception);
for (i=0; i < (long) number_threads; i++)
{
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
return(DestroyFxThreadSet(fx_info));
(void) FxEvaluateExpression(fx_info[i],&alpha,fx_info[i]->exception);
}
fx_expression=DestroyString(fx_expression);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
FxInfo
**fx_info;
Image
*fx_image;
long
progress,
y;
MagickBooleanType
status;
MagickRealType
alpha;
ViewInfo
*fx_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
{
fx_image=DestroyImage(fx_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
status=FxEvaluateExpression(fx_info[0],&alpha,exception);
if (status == MagickFalse)
{
fx_image=DestroyImage(fx_image);
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireCacheView(fx_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) fx_image->rows; y++)
{
IndexPacket
*fx_indexes;
register long
id,
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
id=GetPixelCacheThreadId();
for (x=0; x < (long) fx_image->columns; x++)
{
MagickRealType
alpha;
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
q->red=RoundToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
q->green=RoundToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
q->blue=RoundToQuantum((MagickRealType) QuantumRange*alpha);
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
q->opacity=RoundToQuantum((MagickRealType) QuantumRange*alpha);
else
q->opacity=RoundToQuantum((MagickRealType) (QuantumRange-
QuantumRange*alpha));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
fx_indexes[x]=(IndexPacket) RoundToQuantum((MagickRealType)
QuantumRange*alpha);
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_image->matte=fx_info[0]->matte;
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
Image
*implode_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ResampleFilter
**resample_filter;
ViewInfo
*image_view,
*implode_view;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,MagickTrue,exception);
image_view=AcquireCacheView(image);
implode_view=AcquireCacheView(implode_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*implode_indexes;
register long
id,
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
id=GetPixelCacheThreadId();
for (x=0; x < (long) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)/
radius/2)),-amount);
(void) ResamplePixelColor(resample_filter[id],(double)
(factor*delta.x/scale.x+center.x),(double) (factor*delta.y/
scale.y+center.y),&pixel);
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const unsigned long number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const unsigned long number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
Image
*morph_image,
*morph_images;
long
y;
MagickOffsetType
scene;
MagickRealType
alpha,
beta;
register const Image
*next;
register long
i;
MagickBooleanType
status;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (long) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(QuantumTick(i,number_frames) != MagickFalse))
{
status=image->progress_monitor(MorphImageTag,i,number_frames,
image->client_data);
if (status == MagickFalse)
break;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (long) number_frames; i++)
{
ViewInfo
*image_view,
*morph_view;
beta=(MagickRealType) (i+1.0)/(MagickRealType) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ZoomImage(next,(unsigned long) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(unsigned long) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ZoomImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireCacheView(morph_image);
morph_view=AcquireCacheView(morph_images);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (long) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) morph_images->columns; x++)
{
q->red=RoundToQuantum(alpha*q->red+beta*p->red);
q->green=RoundToQuantum(alpha*q->green+beta*p->green);
q->blue=RoundToQuantum(alpha*q->blue+beta*p->blue);
q->opacity=RoundToQuantum(alpha*q->opacity+beta*p->opacity);
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (long) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
long
quantum;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
unsigned long
height;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
quantum=(long) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption,
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
long
count;
MagickBooleanType
status;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,caption,&metrics);
status=SetImageExtent(caption_image,image->columns,(unsigned long)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatMagickString(geometry,MaxTextExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(long) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(long) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RecolorImage() translate, scale, shear, or rotate image colors. Although
% you can use variable sized matrices, typically you use a 5 x 5 for an RGBA
% image and a 6x6 for CMYKA. Populate the last row with normalized values to
% translate.
%
% The format of the RecolorImage method is:
%
% Image *RecolorImage(const Image *image,const unsigned long order,
% const double *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o order: the number of columns and rows in the recolor matrix.
%
% o color_matrix: An array of double representing the recolor matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RecolorImage(const Image *image,const unsigned long order,
const double *color_matrix,ExceptionInfo *exception)
{
#define RecolorImageTag "Recolor/Image"
Image
*recolor_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
register const double
*k;
ViewInfo
*image_view,
*recolor_view;
/*
Initialize image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
recolor_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (recolor_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(recolor_image,DirectClass) == MagickFalse)
{
InheritException(exception,&recolor_image->exception);
recolor_image=DestroyImage(recolor_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
long
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Recolor image with %ldx%ld color matrix:",order,order);
message=AcquireString("");
k=color_matrix;
for (v=0; v < (long) order; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%ld: ",v);
(void) ConcatenateString(&message,format);
for (u=0; u < (long) order; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Recolor image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
k=color_matrix;
image_view=AcquireCacheView(image);
recolor_view=AcquireCacheView(recolor_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel,
recolor_pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register long
x;
register IndexPacket
*recolor_indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(recolor_view,0,y,recolor_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
recolor_indexes=GetCacheViewAuthenticIndexQueue(recolor_view);
pixel=zero;
recolor_pixel=zero;
for (x=0; x < (long) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes,&pixel);
SetMagickPixelPacket(recolor_image,p,indexes,&recolor_pixel);
switch (order)
{
case 0:
break;
case 1:
{
recolor_pixel.red=k[0]*pixel.red;
break;
}
case 2:
{
recolor_pixel.red=k[0]*pixel.red+k[1]*pixel.green;
recolor_pixel.green=k[2]*pixel.red+k[3]*pixel.green;
break;
}
case 3:
{
recolor_pixel.red=k[0]*pixel.red+k[1]*pixel.green+k[2]*pixel.blue;
recolor_pixel.green=k[3]*pixel.red+k[4]*pixel.green+k[5]*pixel.blue;
recolor_pixel.blue=k[6]*pixel.red+k[7]*pixel.green+k[8]*pixel.blue;
break;
}
case 4:
{
recolor_pixel.red=k[0]*pixel.red+k[1]*pixel.green+k[2]*pixel.blue+
k[12]*QuantumRange;
recolor_pixel.green=k[4]*pixel.red+k[5]*pixel.green+k[6]*pixel.blue+
k[13]*QuantumRange;
recolor_pixel.blue=k[8]*pixel.red+k[9]*pixel.green+k[10]*pixel.blue+
k[14]*QuantumRange;
break;
}
case 5:
{
recolor_pixel.red=k[0]*pixel.red+k[1]*pixel.green+k[2]*pixel.blue+
k[3]*(QuantumRange-pixel.opacity)+k[20]*QuantumRange;
recolor_pixel.green=k[5]*pixel.red+k[6]*pixel.green+k[7]*pixel.blue+
k[8]*(QuantumRange-pixel.opacity)+k[21]*QuantumRange;
recolor_pixel.blue=k[10]*pixel.red+k[11]*pixel.green+k[12]*pixel.blue+
k[13]*(QuantumRange-pixel.opacity)+k[22]*QuantumRange;
recolor_pixel.opacity=(MagickRealType) QuantumRange-(k[15]*pixel.red+
k[16]*pixel.green+k[17]*pixel.blue+k[18]*(QuantumRange-
pixel.opacity)+k[23]*QuantumRange);
break;
}
default:
{
recolor_pixel.red=k[0]*pixel.red+k[1]*pixel.green+k[2]*pixel.blue+
k[3]*pixel.index+k[4]*((Quantum) QuantumRange-pixel.opacity)+
k[30]*QuantumRange;
recolor_pixel.green=k[6]*pixel.red+k[7]*pixel.green+k[8]*pixel.blue+
k[9]*pixel.index+k[10]*((Quantum) QuantumRange-pixel.opacity)+
k[31]*QuantumRange;
recolor_pixel.blue=k[12]*pixel.red+k[13]*pixel.green+k[14]*pixel.blue+
k[15]*pixel.index+k[16]*((Quantum) QuantumRange-pixel.opacity)+
k[32]*QuantumRange;
if (image->colorspace == CMYKColorspace)
recolor_pixel.index=k[18]*pixel.red+k[19]*pixel.green+k[20]*
pixel.blue+k[21]*pixel.index+k[22]*((Quantum) QuantumRange-
pixel.opacity)+k[33]*QuantumRange;
recolor_pixel.opacity=(MagickRealType) QuantumRange-(k[24]*pixel.red+
k[25]*pixel.green+k[26]*pixel.blue+k[27]*pixel.index+k[28]*
(QuantumRange-pixel.opacity)+k[34]*QuantumRange);
break;
}
}
q->red=RoundToQuantum(recolor_pixel.red);
q->green=RoundToQuantum(recolor_pixel.green);
q->blue=RoundToQuantum(recolor_pixel.blue);
q->opacity=RoundToQuantum(recolor_pixel.opacity);
if (image->colorspace == CMYKColorspace)
recolor_indexes[x]=RoundToQuantum(recolor_pixel.index);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(recolor_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,RecolorImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
recolor_view=DestroyCacheView(recolor_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
recolor_image=DestroyImage(recolor_image);
return(recolor_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
Image
*sepia_image;
long
progress,
y;
MagickBooleanType
status;
ViewInfo
*image_view,
*sepia_view;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
sepia_view=AcquireCacheView(sepia_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
MagickRealType
intensity,
tone;
intensity=(MagickRealType) PixelIntensityToQuantum(p);
tone=intensity > threshold ? (MagickRealType) QuantumRange : intensity+
(MagickRealType) QuantumRange-threshold;
q->red=RoundToQuantum(tone);
tone=intensity > (7.0*threshold/6.0) ? (MagickRealType) QuantumRange :
intensity+(MagickRealType) QuantumRange-7.0*threshold/6.0;
q->green=RoundToQuantum(tone);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
q->blue=RoundToQuantum(tone);
tone=threshold/7.0;
if ((MagickRealType) q->green < tone)
q->green=RoundToQuantum(tone);
if ((MagickRealType) q->blue < tone)
q->blue=RoundToQuantum(tone);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const long x_offset,const long y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const long x_offset,const long y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
Image
*border_image,
*clone_image,
*shadow_image;
long
progress,
y;
MagickBooleanType
status;
RectangleInfo
border_info;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
border_info.width=(unsigned long) (2.0*sigma+0.5);
border_info.height=(unsigned long) (2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(border_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) border_image->rows; y++)
{
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) border_image->columns; x++)
{
q->red=border_image->background_color.red;
q->green=border_image->background_color.green;
q->blue=border_image->background_color.blue;
if (border_image->matte == MagickFalse)
q->opacity=border_image->background_color.opacity;
else
q->opacity=RoundToQuantum((MagickRealType) (QuantumRange-(QuantumRange-
q->opacity)*opacity/100.0));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ShadowImageTag,progress++,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(long) border_info.width;
shadow_image->page.height+=y_offset-(long) border_info.height;
shadow_image->page.x+=x_offset-(long) border_info.width;
shadow_image->page.y+=y_offset-(long) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
long
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
ViewInfo
*random_view;
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_view=AcquireCacheView(random_image);
for (y=0; y < (long) random_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (long) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*GetPseudoRandomValue());
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
random_view=DestroyCacheView(random_view);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image);
(void) NegateImage(dodge_image,MagickFalse);
(void) TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
blend_image->geometry=AcquireString("20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
#define SolarizeImageTag "Solarize/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register long
i;
/*
Solarize colormap.
*/
for (i=0; i < (long) image->colors; i++)
{
if ((MagickRealType) image->colormap[i].red > threshold)
image->colormap[i].red=(Quantum) QuantumRange-image->colormap[i].red;
if ((MagickRealType) image->colormap[i].green > threshold)
image->colormap[i].green=(Quantum) QuantumRange-
image->colormap[i].green;
if ((MagickRealType) image->colormap[i].blue > threshold)
image->colormap[i].blue=(Quantum) QuantumRange-
image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
if ((MagickRealType) q->red > threshold)
q->red=(Quantum) QuantumRange-q->red;
if ((MagickRealType) q->green > threshold)
q->green=(Quantum) QuantumRange-q->green;
if ((MagickRealType) q->blue > threshold)
q->blue=(Quantum) QuantumRange-q->blue;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((unsigned long) (alpha) >> (unsigned long) \
(i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) ? (unsigned long) (alpha) \
| (1UL << (unsigned long) (i)) : (unsigned long) (alpha) & \
~(1UL << (unsigned long) (i)))
#define SteganoImageTag "Stegano/Image"
Image
*stegano_image;
int
c;
long
i,
j,
k,
y;
MagickBooleanType
status;
PixelPacket
pixel;
register long
x;
register PixelPacket
*q;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
k=image->offset;
for (i=MAGICKCORE_QUANTUM_DEPTH-1; (i >= 0) && (j < MAGICKCORE_QUANTUM_DEPTH); i--)
{
for (y=0; (y < (long) watermark->rows) && (j < MAGICKCORE_QUANTUM_DEPTH); y++)
{
for (x=0; (x < (long) watermark->columns) && (j < MAGICKCORE_QUANTUM_DEPTH); x++)
{
(void) GetOneVirtualPixel(watermark,x,y,&pixel,exception);
q=GetAuthenticPixels(stegano_image,k % (long) stegano_image->columns,
k/(long) stegano_image->columns,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(q->red,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
case 1:
{
SetBit(q->green,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
case 2:
{
SetBit(q->blue,j,GetBit(PixelIntensityToQuantum(&pixel),i));
break;
}
}
if (SyncAuthenticPixels(stegano_image,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (long) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(QuantumTick(MAGICKCORE_QUANTUM_DEPTH-i,MAGICKCORE_QUANTUM_DEPTH) != MagickFalse))
{
status=image->progress_monitor(SteganoImageTag,
MAGICKCORE_QUANTUM_DEPTH-i,MAGICKCORE_QUANTUM_DEPTH,
image->client_data);
if (status == MagickFalse)
break;
}
}
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const long x_offset,const long y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const long x_offset,const long y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
long
y;
MagickBooleanType
status;
register const PixelPacket
*p,
*q;
register long
x;
register PixelPacket
*r;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
/*
Copy left image to red channel and right image to blue channel.
*/
for (y=0; y < (long) stereo_image->rows; y++)
{
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (long) stereo_image->columns; x++)
{
r->red=p->red;
r->green=q->green;
r->blue=q->blue;
r->opacity=(Quantum) ((p->opacity+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(QuantumTick(y,image->rows) != MagickFalse))
{
status=image->progress_monitor(StereoImageTag,y,stereo_image->rows,
stereo_image->client_data);
if (status == MagickFalse)
break;
}
}
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
Image
*swirl_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ResampleFilter
**resample_filter;
ViewInfo
*image_view,
*swirl_view;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,MagickTrue,exception);
image_view=AcquireCacheView(image);
swirl_view=AcquireCacheView(swirl_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*swirl_indexes;
register PixelPacket
*q;
register long
id,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
id=GetPixelCacheThreadId();
for (x=0; x < (long) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
MagickRealType
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
(void) ResamplePixelColor(resample_filter[id],(double) ((cosine*
delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+
cosine*delta.y)/scale.y+center.y),&pixel);
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
GeometryInfo
geometry_info;
Image
*tint_image;
long
progress,
y;
MagickBooleanType
status;
MagickStatusType
flags;
MagickPixelPacket
color_vector,
pixel;
ViewInfo
*image_view,
*tint_view;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
PixelIntensity(&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
PixelIntensity(&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
PixelIntensity(&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
tint_view=AcquireCacheView(tint_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
weight;
weight=QuantumScale*p->red-0.5;
pixel.red=(MagickRealType) p->red+color_vector.red*(1.0-(4.0*
(weight*weight)));
q->red=RoundToQuantum(pixel.red);
weight=QuantumScale*p->green-0.5;
pixel.green=(MagickRealType) p->green+color_vector.green*(1.0-(4.0*
(weight*weight)));
q->green=RoundToQuantum(pixel.green);
weight=QuantumScale*p->blue-0.5;
pixel.blue=(MagickRealType) p->blue+color_vector.blue*(1.0-(4.0*
(weight*weight)));
q->blue=RoundToQuantum(pixel.blue);
q->opacity=p->opacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const long x,const long y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const long x,const long y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*canvas_image,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,
canvas_image->rows,MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatMagickString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,image->rows/2.0,
image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
Image
*wave_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
*sine_map;
register long
i;
ResampleFilter
**resample_filter;
ViewInfo
*wave_view;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
wave_image=CloneImage(image,image->columns,(unsigned long) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (MagickRealType *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (long) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((2*MagickPI*i)/wave_length);
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,MagickTrue,exception);
wave_view=AcquireCacheView(wave_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
id,
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
id=GetPixelCacheThreadId();
(void) SetResampleFilterVirtualPixelMethod(resample_filter[id],
BackgroundVirtualPixelMethod);
for (x=0; x < (long) wave_image->columns; x++)
{
(void) ResamplePixelColor(resample_filter[id],(double) x,(double) (y-
sine_map[x]),&pixel);
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
|
producer-consumer-with-linkedlist.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
struct Node{
int data;
struct Node *next;
};
struct Node* head = NULL;
void insertLast(struct Node** head_ref, int new_data){
struct Node* new_node = (struct Node*) malloc(sizeof(struct Node));
struct Node *last = *head_ref;
new_node->data = new_data;
new_node->next = NULL;
if (*head_ref == NULL){
*head_ref = new_node;
return;
}
while (last->next != NULL)
last = last->next;
last->next = new_node;
return;
}
struct Node* deleteFirst(struct Node** head_ref) {
if (*head_ref == NULL){
printf("List is empty!");
return NULL;
}
struct Node *temp = *head_ref;
*head_ref = temp->next;
return temp;
}
void printList(struct Node *node){
while (node != NULL)
{
printf("%d -> ", node->data);
node = node->next;
}
printf("NULL");
}
void produce(int el){
insertLast(&head, el);
printf("\nProduced %d\n", el);
}
void consume(){
struct Node *temp = deleteFirst(&head);
printf("\nConsumed %d\n", temp->data);
free(temp);
}
int main(){
int id, el = 1;
#pragma omp parallel num_threads(2)
{
id = omp_get_thread_num();
if(id == 0){
while(1){
#pragma omp critical
{
produce(el);
el++;
printf("List: ");
printList(head);
fgetc(stdin);
}
}
} else {
while(1){
#pragma omp critical
{
consume();
printf("List: ");
printList(head);
fgetc(stdin);
}
}
}
}
return 0;
} |
kernel_cpu_2.c | // #ifdef __cplusplus
// extern "C" {
// #endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#ifdef _OPENMP
#include <omp.h>
#endif // (in directory known to compiler)
#include <stdlib.h> // (in directory known to compiler)
#include <stdio.h>
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in directory provided here)
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/timer/timer.h" // (in directory provided here) needed by timer
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_cpu_2.h" // (in directory provided here)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu_2(int cores_arg,
knode *knodes, long knodes_elem,
int order, long maxheight, int count,
long *currKnode, long *offset, long *lastKnode,
long *offset_2, int *start, int *end, int *recstart,
int *reclength) {
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
// common variables
int i;
time0 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
int max_nthreads;
#ifdef _OPENMP
max_nthreads = omp_get_max_threads();
// printf("max # of threads = %d\n", max_nthreads);
omp_set_num_threads(cores_arg);
// printf("set # of threads = %d\n", cores_arg);
#endif
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
time1 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
// private thread IDs
int thid;
int bid;
// process number of querries
#pragma omp parallel for private(i, thid)
for (bid = 0; bid < count; bid++) {
// process levels of the tree
for (i = 0; i < maxheight; i++) {
// process all leaves at each level
for (thid = 0; thid < threadsPerBlock; thid++) {
if ((knodes[currKnode[bid]].keys[thid] <= start[bid]) &&
(knodes[currKnode[bid]].keys[thid + 1] > start[bid])) {
// this conditional statement is inserted to avoid crush due to but in
// original code
// "offset[bid]" calculated below that later addresses part of knodes
// goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main
// function are out of bounds of knodes that they address
if (knodes[currKnode[bid]].indices[thid] < knodes_elem) {
offset[bid] = knodes[currKnode[bid]].indices[thid];
}
}
if ((knodes[lastKnode[bid]].keys[thid] <= end[bid]) &&
(knodes[lastKnode[bid]].keys[thid + 1] > end[bid])) {
// this conditional statement is inserted to avoid crush due to but in
// original code
// "offset_2[bid]" calculated below that later addresses part of
// knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main
// function are out of bounds of knodes that they address
if (knodes[lastKnode[bid]].indices[thid] < knodes_elem) {
offset_2[bid] = knodes[lastKnode[bid]].indices[thid];
}
}
}
// set for next tree level
currKnode[bid] = offset[bid];
lastKnode[bid] = offset_2[bid];
}
// process leaves
for (thid = 0; thid < threadsPerBlock; thid++) {
// Find the index of the starting record
if (knodes[currKnode[bid]].keys[thid] == start[bid]) {
recstart[bid] = knodes[currKnode[bid]].indices[thid];
}
}
// process leaves
for (thid = 0; thid < threadsPerBlock; thid++) {
// Find the index of the ending record
if (knodes[lastKnode[bid]].keys[thid] == end[bid]) {
reclength[bid] =
knodes[lastKnode[bid]].indices[thid] - recstart[bid] + 1;
}
}
}
time2 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time2 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time2 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time2 - time0) / 1000000);
} // main
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
// #ifdef __cplusplus
// }
// #endif
|
GB_unop__lnot_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_fp32_fp32)
// op(A') function: GB (_unop_tran__lnot_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dot_product_tiled.c | /*
* OpenMP implementation of dot product calculation.
* This program is used as the driving example in demos in the module Heterogeneous Programming with OpenMP
*
* @author Apan Qasem
*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include <omp.h>
#define REPS 100
double t0;
double mysecond() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
int main(int argc, char *argv[]) {
int M = atoi(argv[1]); // size of vectors
int N = atoi(argv[2]); // number of OpenMP threads
float*a, *b;
a = (float*) malloc(sizeof(float) * M);
b = (float*) malloc(sizeof(float) * M);
int i, j, k;
for (i = 0; i < M; i++) {
a[i] = i;
b[i] = i + 3;
}
omp_set_num_threads(N);
float sum = 0;
t0 = mysecond();
for (k = 0; k < M; k = k + 1000) {
for (j = 0; j < 100; j++) {
#pragma omp parallel for reduction(+:sum) schedule(static, 1024)
for (i = k; i < (k + 1000); i++)
sum += a[i] * b[i];
}
}
t0 = (mysecond() - t0) * 1.e3;
fprintf(stdout, "result = %1.3e\n", sum);
fprintf(stdout, "parallel loop = %3.2f ms\n", t0);
return 0;
}
|
task-taskgroup.c | /*
Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze
(joachim.protze@tu-dresden.de), Jonas Hahnfeld
(hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir
Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin
Schulz.
LLNL-CODE-773957
All rights reserved.
This file is part of Archer. For details, see
https://pruners.github.io/archer. Please also read
https://github.com/PRUNERS/archer/blob/master/LICENSE.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char* argv[])
{
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
#pragma omp master
{
#pragma omp taskgroup
{
#pragma omp task shared(var)
{
var++;
}
// Give other thread time to steal the task.
sleep(1);
}
var++;
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK: DONE
|
kmp_detach_tasks_t2.c | // RUN: %libomp-compile && env OMP_NUM_THREADS='3' %libomp-run
// RUN: %libomp-compile && env OMP_NUM_THREADS='1' %libomp-run
// REQUIRES: !abt
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
// detached tied
#define PTASK_FLAG_DETACHABLE 0x41
// OpenMP RTL interfaces
typedef unsigned long long kmp_uint64;
typedef long long kmp_int64;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// Compiler-generated code (emulation)
typedef struct ident {
void* dummy; // not used in the library
} ident_t;
typedef enum kmp_event_type_t {
KMP_EVENT_UNINITIALIZED = 0,
KMP_EVENT_ALLOW_COMPLETION = 1
} kmp_event_type_t;
typedef struct {
kmp_event_type_t type;
union {
void *task;
} ed;
} kmp_event_t;
typedef struct shar { // shareds used in the task
} *pshareds;
typedef struct task {
pshareds shareds;
int(*routine)(int,struct task*);
int part_id;
// void *destructor_thunk; // optional, needs flag setting if provided
// int priority; // optional, needs flag setting if provided
// ------------------------------
// privates used in the task:
omp_event_handle_t evt;
} *ptask, kmp_task_t;
typedef int(* task_entry_t)( int, ptask );
#ifdef __cplusplus
extern "C" {
#endif
extern int __kmpc_global_thread_num(void *id_ref);
extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, task_entry_t rtn);
extern int __kmpc_omp_task(id *loc, int gtid, kmp_task_t *task);
extern omp_event_handle_t __kmpc_task_allow_completion_event(
ident_t *loc_ref, int gtid, kmp_task_t *task);
#ifdef __cplusplus
}
#endif
int volatile checker;
// User's code, outlined into task entry
int task_entry(int gtid, ptask task) {
my_sleep(2.0);
checker = 1;
return 0;
}
int main() {
int i, j, gtid = __kmpc_global_thread_num(NULL);
int nt = omp_get_max_threads();
ptask task;
pshareds psh;
checker = 0;
omp_set_dynamic(0);
#pragma omp parallel //num_threads(N)
{
#pragma omp master
{
int gtid = __kmpc_global_thread_num(NULL);
omp_event_handle_t evt;
/*
#pragma omp task detach(evt)
{}
*/
task = (ptask)__kmpc_omp_task_alloc(NULL,gtid,PTASK_FLAG_DETACHABLE,
sizeof(struct task),sizeof(struct shar),&task_entry);
psh = task->shareds;
evt = (omp_event_handle_t)__kmpc_task_allow_completion_event(NULL,gtid,task);
task->evt = evt;
__kmpc_omp_task(NULL, gtid, task);
omp_fulfill_event(evt);
#pragma omp taskwait
;
// printf("after tw %d\n", omp_get_thread_num());
} // end master
} // end parallel
// check results
if (checker == 1) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
XT_MagElecDen.c | /* ============================================================================
* Copyright (c) 2013 K. Aditya Mohan (Purdue University)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of K. Aditya Mohan, Purdue
* University, nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
#include <stdio.h>
#include "XT_Structures.h"
#include "XT_Prior.h"
#include "XT_Debug.h"
#include "XT_DensityUpdate.h"
#include "allocate.h"
#include <math.h>
Real_t computeMagDensityCost(ScannedObject* ObjPtr, TomoInputs* InpPtr);
Real_t computeElecDensityCost(ScannedObject* ObjPtr, TomoInputs* InpPtr);
void reconstruct_magnetization (ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr)
{
int32_t i, Iter, j, k;
Real_t MagUpdate = 0, MagSum = 0, alpha_mag, cost, cost_old;
/*Real_t MagUpdate_x = 0, MagSum_x = 0, MagUpdate_y = 0, MagSum_y = 0, MagUpdate_z = 0, MagSum_z = 0;*/
Real_arr_t ****grad_mag;
grad_mag = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3);
cost_old = computeMagDensityCost(ObjPtr, InpPtr);
fprintf(InpPtr->debug_file_ptr, "------------ Mag Steep Grad Descent Iter = 0, cost = %e --------------\n", cost_old);
for (Iter = 1; Iter < InpPtr->DensUpdate_MaxIter; Iter++)
{
compute_mag_gradstep(ObjPtr, InpPtr, fftptr, grad_mag, &alpha_mag);
/*MagUpdate_x = 0; MagUpdate_y = 0; MagUpdate_z = 0; MagSum_x = 0; MagSum_y = 0; MagSum_z = 0;*/
MagUpdate = 0; MagSum = 0;
for (i = 0; i < ObjPtr->N_z; i++)
for (j = 0; j < ObjPtr->N_y; j++)
for (k = 0; k < ObjPtr->N_x; k++)
{
ObjPtr->Magnetization[i][j][k][0] -= alpha_mag*grad_mag[i][j][k][0];
ObjPtr->Magnetization[i][j][k][1] -= alpha_mag*grad_mag[i][j][k][1];
ObjPtr->Magnetization[i][j][k][2] -= alpha_mag*grad_mag[i][j][k][2];
MagUpdate += sqrt(pow(alpha_mag*grad_mag[i][j][k][0],2)+pow(alpha_mag*grad_mag[i][j][k][1],2)+pow(alpha_mag*grad_mag[i][j][k][2],2));
MagSum += sqrt(pow(ObjPtr->Magnetization[i][j][k][0],2)+pow(ObjPtr->Magnetization[i][j][k][1],2)+pow(ObjPtr->Magnetization[i][j][k][2],2));
/*MagUpdate_x += fabs(alpha_mag*grad_mag[i][j][k][2]);
MagUpdate_y += fabs(alpha_mag*grad_mag[i][j][k][1]);
MagUpdate_z += fabs(alpha_mag*grad_mag[i][j][k][0]);
MagSum_x += fabs(ObjPtr->Magnetization[i][j][k][2]);
MagSum_y += fabs(ObjPtr->Magnetization[i][j][k][1]);
MagSum_z += fabs(ObjPtr->Magnetization[i][j][k][0]);*/
}
MagUpdate = MagUpdate*100/(MagSum + EPSILON_ERROR);
/*MagUpdate_x = MagUpdate_x*100/(MagSum_x + EPSILON_ERROR);
MagUpdate_y = MagUpdate_y*100/(MagSum_y + EPSILON_ERROR);
MagUpdate_z = MagUpdate_z*100/(MagSum_z + EPSILON_ERROR);*/
compute_magcrossprodtran (ObjPtr->Magnetization, ObjPtr->ErrorPotMag, ObjPtr->MagFilt, fftptr, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 1);
for (i = 0; i < ObjPtr->N_z; i++)
for (j = 0; j < ObjPtr->N_y; j++)
for (k = 0; k < ObjPtr->N_x; k++)
{
ObjPtr->ErrorPotMag[i][j][k][0] = ObjPtr->MagPotentials[i][j][k][0] - ObjPtr->MagPotDual[i][j][k][0] - ObjPtr->ErrorPotMag[i][j][k][0];
ObjPtr->ErrorPotMag[i][j][k][1] = ObjPtr->MagPotentials[i][j][k][1] - ObjPtr->MagPotDual[i][j][k][1] - ObjPtr->ErrorPotMag[i][j][k][1];
ObjPtr->ErrorPotMag[i][j][k][2] = ObjPtr->MagPotentials[i][j][k][2] - ObjPtr->MagPotDual[i][j][k][2] - ObjPtr->ErrorPotMag[i][j][k][2];
}
cost = computeMagDensityCost(ObjPtr, InpPtr);
if (cost > cost_old)
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "ERROR: Cost increased when updating magnetization.\n");
cost_old = cost;
fprintf(InpPtr->debug_file_ptr, "------------ Mag Steep Grad Descent Iter = %d, cost = %e, Avg update as percentage = %e, sum = %e--------------\n", Iter, cost, MagUpdate, MagSum);
/*fprintf(InpPtr->debug_file_ptr, "------------ Mag Steep Grad Descent Iter = %d, cost = %e, Avg update as percentage (x, y, z) = (%e, %e, %e), sum = (%e, %e, %e)--------------\n", Iter, cost, MagUpdate_x, MagUpdate_y, MagUpdate_z, MagSum_x, MagSum_y, MagSum_z);
if (Iter > 1 && MagUpdate_x < InpPtr->DensUpdate_thresh && MagUpdate_y < InpPtr->DensUpdate_thresh && MagUpdate_z < InpPtr->DensUpdate_thresh)*/
if (Iter > 1 && MagUpdate < InpPtr->DensUpdate_thresh)
{
fprintf(InpPtr->debug_file_ptr, "******* Mag Steepest gradient descent algorithm has converged! *********\n");
break;
}
}
multifree(grad_mag, 4);
}
Real_t computeMagDensityCost(ScannedObject* ObjPtr, TomoInputs* InpPtr)
{
Real_t cost=0, forward=0, prior=0;
Real_t Diff;
int32_t j,k,p,cidx,slice;
bool j_minus, k_minus, j_plus, k_plus, p_plus;
/* #pragma omp parallel for private(j, k, sino_idx, slice)*/
for (slice=0; slice<ObjPtr->N_z; slice++)
{
for (j=0; j<ObjPtr->N_y; j++)
{
for (k=0; k<ObjPtr->N_x; k++)
{
forward += InpPtr->ADMM_mu*ObjPtr->ErrorPotMag[slice][j][k][0]*ObjPtr->ErrorPotMag[slice][j][k][0];
forward += InpPtr->ADMM_mu*ObjPtr->ErrorPotMag[slice][j][k][1]*ObjPtr->ErrorPotMag[slice][j][k][1];
forward += InpPtr->ADMM_mu*ObjPtr->ErrorPotMag[slice][j][k][2]*ObjPtr->ErrorPotMag[slice][j][k][2];
}
}
}
forward /= 2.0;
/*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/
prior = 0;
/* #pragma omp parallel for private(Diff, p, j, k, j_minus, k_minus, p_plus, j_plus, k_plus, cidx) reduction(+:prior)*/
for (p = 0; p < ObjPtr->N_z; p++)
for (j = 0; j < ObjPtr->N_y; j++)
{
for (k = 0; k < ObjPtr->N_x; k++)
{
j_minus = (j - 1 >= 0)? true : false;
k_minus = (k - 1 >= 0)? true : false;
p_plus = (p + 1 < ObjPtr->N_z)? true : false;
j_plus = (j + 1 < ObjPtr->N_y)? true : false;
k_plus = (k + 1 < ObjPtr->N_x)? true : false;
if(k_plus == true) {
for (cidx = 0; cidx < 3; cidx++){
Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j][k + 1][cidx]);
prior += InpPtr->Spatial_Filter[1][1][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
if(j_plus == true) {
if(k_minus == true) {
for (cidx = 0; cidx < 3; cidx++){
Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k - 1][cidx]);
prior += InpPtr->Spatial_Filter[1][2][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
for (cidx = 0; cidx < 3; cidx++){
Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k][cidx]);
prior += InpPtr->Spatial_Filter[1][2][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
if(k_plus == true) {
for (cidx = 0; cidx < 3; cidx++){
Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k + 1][cidx]);
prior += InpPtr->Spatial_Filter[1][2][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
}
if (p_plus == true)
{
if(j_minus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k][cidx];
prior += InpPtr->Spatial_Filter[2][0][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p+1][j][k][cidx];
prior += InpPtr->Spatial_Filter[2][1][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
if(j_plus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p+1][j + 1][k][cidx];
prior += InpPtr->Spatial_Filter[2][2][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
if(j_minus == true)
{
if(k_minus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k - 1][cidx];
prior += InpPtr->Spatial_Filter[2][0][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
if(k_plus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k + 1][cidx];
prior += InpPtr->Spatial_Filter[2][0][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
}
if(k_minus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j][k - 1][cidx];
prior += InpPtr->Spatial_Filter[2][1][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
if(j_plus == true)
{
if(k_minus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j + 1][k - 1][cidx];
prior += InpPtr->Spatial_Filter[2][2][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
if(k_plus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j + 1][k + 1][cidx];
prior += InpPtr->Spatial_Filter[2][2][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
}
if(k_plus == true)
{
for (cidx = 0; cidx < 3; cidx++){
Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j][k + 1][cidx];
prior += InpPtr->Spatial_Filter[2][1][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]);
}
}
}
}
}
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Mag Density Update Forward cost = %f\n",forward);
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Mag Density Update Prior cost = %f\n",prior);
cost = forward + prior;
return cost;
}
void reconstruct_elecchargedens (ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr)
{
int32_t i, Iter, j, k;
Real_t ElecUpdate = 0, ElecSum = 0, alpha_elec, cost, cost_old, charge_old;
Real_arr_t ***grad_elec;
grad_elec = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x);
cost_old = computeElecDensityCost(ObjPtr, InpPtr);
fprintf(InpPtr->debug_file_ptr, "------------ Elec Steep Grad Descent Iter = 0, cost = %e --------------\n", cost_old);
for (Iter = 1; Iter < InpPtr->DensUpdate_MaxIter; Iter++)
{
compute_elec_gradstep(ObjPtr, InpPtr, fftptr, grad_elec, &alpha_elec);
ElecUpdate = 0; ElecSum = 0;
for (i = 0; i < ObjPtr->N_z; i++)
for (j = 0; j < ObjPtr->N_y; j++)
for (k = 0; k < ObjPtr->N_x; k++)
{
charge_old = ObjPtr->ChargeDensity[i][j][k];
ObjPtr->ChargeDensity[i][j][k] -= alpha_elec*grad_elec[i][j][k];
if (ObjPtr->ChargeDensity[i][j][k] <= 0)
ObjPtr->ChargeDensity[i][j][k] = 0;
ElecUpdate += fabs(ObjPtr->ChargeDensity[i][j][k] - charge_old);
ElecSum += fabs(ObjPtr->ChargeDensity[i][j][k]);
}
ElecUpdate = ElecUpdate*100/(ElecSum + EPSILON_ERROR);
compute_elecprodtran (ObjPtr->ChargeDensity, ObjPtr->ErrorPotElec, ObjPtr->ElecFilt, fftptr, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 1);
for (i = 0; i < ObjPtr->N_z; i++)
for (j = 0; j < ObjPtr->N_y; j++)
for (k = 0; k < ObjPtr->N_x; k++)
{
ObjPtr->ErrorPotElec[i][j][k] = ObjPtr->ElecPotentials[i][j][k] - ObjPtr->ElecPotDual[i][j][k] - ObjPtr->ErrorPotElec[i][j][k];
}
cost = computeElecDensityCost(ObjPtr, InpPtr);
if (cost > cost_old)
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "ERROR: Cost increased when updating charge density.\n");
cost_old = cost;
fprintf(InpPtr->debug_file_ptr, "------------ Elec Steep Grad Descent Iter = %d, cost = %e, Avg update as percentage (elec) = (%e), update = (%e), sum = (%e)--------------\n", Iter, cost, ElecUpdate, ElecUpdate, ElecSum);
if (Iter > 1 && ElecUpdate < InpPtr->DensUpdate_thresh)
{
fprintf(InpPtr->debug_file_ptr, "******* Elec Steepest gradient descent algorithm has converged! *********\n");
break;
}
}
multifree(grad_elec, 3);
}
Real_t computeElecDensityCost(ScannedObject* ObjPtr, TomoInputs* InpPtr)
{
Real_t cost=0, forward=0, prior=0;
Real_t Diff;
int32_t j,k,p,slice;
bool j_minus, k_minus, j_plus, k_plus, p_plus;
/* #pragma omp parallel for private(j, k, sino_idx, slice)*/
for (slice=0; slice<ObjPtr->N_z; slice++)
{
for (j=0; j<ObjPtr->N_y; j++)
{
for (k=0; k<ObjPtr->N_x; k++)
{
forward += InpPtr->ADMM_mu*ObjPtr->ErrorPotElec[slice][j][k]*ObjPtr->ErrorPotElec[slice][j][k];
}
}
}
forward /= 2.0;
/*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/
prior = 0;
/* #pragma omp parallel for private(Diff, p, j, k, j_minus, k_minus, p_plus, j_plus, k_plus, cidx) reduction(+:prior)*/
for (p = 0; p < ObjPtr->N_z; p++)
for (j = 0; j < ObjPtr->N_y; j++)
{
for (k = 0; k < ObjPtr->N_x; k++)
{
j_minus = (j - 1 >= 0)? true : false;
k_minus = (k - 1 >= 0)? true : false;
p_plus = (p + 1 < ObjPtr->N_z)? true : false;
j_plus = (j + 1 < ObjPtr->N_y)? true : false;
k_plus = (k + 1 < ObjPtr->N_x)? true : false;
if(k_plus == true) {
Diff = (ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p][j][k + 1]);
prior += InpPtr->Spatial_Filter[1][1][2] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
if(j_plus == true) {
if(k_minus == true) {
Diff = (ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p][j + 1][k - 1]);
prior += InpPtr->Spatial_Filter[1][2][0] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
Diff = (ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p][j + 1][k]);
prior += InpPtr->Spatial_Filter[1][2][1] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
if(k_plus == true) {
Diff = (ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p][j + 1][k + 1]);
prior += InpPtr->Spatial_Filter[1][2][2] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
}
if (p_plus == true)
{
if(j_minus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j - 1][k];
prior += InpPtr->Spatial_Filter[2][0][1] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p+1][j][k];
prior += InpPtr->Spatial_Filter[2][1][1] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
if(j_plus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p+1][j + 1][k];
prior += InpPtr->Spatial_Filter[2][2][1] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
if(j_minus == true)
{
if(k_minus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j - 1][k - 1];
prior += InpPtr->Spatial_Filter[2][0][0] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
if(k_plus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j - 1][k + 1];
prior += InpPtr->Spatial_Filter[2][0][2] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
}
if(k_minus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j][k - 1];
prior += InpPtr->Spatial_Filter[2][1][0] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
if(j_plus == true)
{
if(k_minus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j + 1][k - 1];
prior += InpPtr->Spatial_Filter[2][2][0] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
if(k_plus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j + 1][k + 1];
prior += InpPtr->Spatial_Filter[2][2][2] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
}
if(k_plus == true)
{
Diff = ObjPtr->ChargeDensity[p][j][k] - ObjPtr->ChargeDensity[p + 1][j][k + 1];
prior += InpPtr->Spatial_Filter[2][1][2] * QGGMRF_Value(Diff,InpPtr->Elec_Sigma_Q, InpPtr->Elec_Sigma_Q_P, ObjPtr->Elec_C);
}
}
}
}
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Elec Density Update Forward cost = %f\n",forward);
check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Elec Density Update Prior cost = %f\n",prior);
cost = forward + prior;
return cost;
}
|
declare_variant_mixed_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=50 %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=50 | FileCheck %s --check-prefix HOST
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc -fopenmp-version=50
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -fopenmp-version=50 | FileCheck %s --check-prefix GPU
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -emit-pch -o %t -fopenmp-version=50
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - -fopenmp-version=50 | FileCheck %s --check-prefix GPU
// expected-no-diagnostics
// HOST: @base = alias i32 (double), i32 (double)* @hst
#ifndef HEADER
#define HEADER
int dev(double i) { return 0; }
int hst(double i) { return 1; }
#pragma omp declare variant(hst) match(device = {kind(host)})
#pragma omp declare variant(dev) match(device = {kind(gpu)})
int base();
// HOST-LABEL: define void @foo()
// HOST: call i32 (double, ...) bitcast (i32 (double)* @base to i32 (double, ...)*)(double -1.000000e+00)
// HOST: call i32 @hst(double -2.000000e+00)
// HOST: call void [[OFFL:@.+_foo_l29]]()
void foo() {
base(-1);
hst(-2);
#pragma omp target
{
base(-3);
dev(-4);
}
}
// HOST: define {{.*}}void [[OFFL]]()
// HOST: call i32 (double, ...) bitcast (i32 (double)* @base to i32 (double, ...)*)(double -3.000000e+00)
// HOST: call i32 @dev(double -4.000000e+00)
// GPU: define {{.*}}void @__omp_offloading_{{.+}}_foo_l29()
// GPU: call i32 @base(double -3.000000e+00)
// GPU: call i32 @dev(double -4.000000e+00)
// GPU: define {{.*}}i32 @base(double
// GPU: ret i32 0
// GPU: define {{.*}}i32 @dev(double
// GPU: ret i32 0
#endif // HEADER
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.